{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\n Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment\n\n* 'dashboard-environment' of github.com:rceee/split:\n Made Rails ENV conditional on Rails environment existing using controller\n\nConflicts:\n\tlib/split/dashboard/views/layout.erb\n\n @@ -11,6 +11,7 @@\n \n
\n

Split Dashboard

\n+

<%= @current_env %>

\n
\n \n
\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675125,"cells":{"id":{"kind":"string","value":"10070775"},"text":{"kind":"string","value":" layout.erb\n \n\n\n\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\n\n\nSplit\n\n
\n

Split Dashboard

\n
\n\n
\n\n
\n <%= yield %>\n
\n\n
\n

Powered by Split v<%=Split::VERSION %>

\n
\n\n\n\n Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment\n\n* 'dashboard-environment' of github.com:rceee/split:\n Made Rails ENV conditional on Rails environment existing using controller\n\nConflicts:\n\tlib/split/dashboard/views/layout.erb\n\n @@ -11,6 +11,7 @@\n \n
\n

Split Dashboard

\n+

<%= @current_env %>

\n
\n \n
\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675126,"cells":{"id":{"kind":"string","value":"10070776"},"text":{"kind":"string","value":" layout.erb\n \n\n\n\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\n\n\nSplit\n\n
\n

Split Dashboard

\n
\n\n
\n\n
\n <%= yield %>\n
\n\n
\n

Powered by Split v<%=Split::VERSION %>

\n
\n\n\n\n Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment\n\n* 'dashboard-environment' of github.com:rceee/split:\n Made Rails ENV conditional on Rails environment existing using controller\n\nConflicts:\n\tlib/split/dashboard/views/layout.erb\n\n @@ -11,6 +11,7 @@\n \n
\n

Split Dashboard

\n+

<%= @current_env %>

\n
\n \n
\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675127,"cells":{"id":{"kind":"string","value":"10070777"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n\n## Development\n\nSource hosted at [GitHub](http://github.com/splitrb/split).\nReport Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues).\nDiscussion at [Google Groups](https://groups.google.com/d/forum/split-ruby)\n\nTests can be ran with `rake spec`\n\n### Note on Patches/Pull Requests\n\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Clarify test running instructions\n\n @@ -700,11 +700,19 @@ Over 70 different people have contributed to the project, you can see them all h\n \n ## Development\n \n+Run the tests like this:\n+\n+ # Start a Redis server in another tab.\n+ redis-server\n+\n+ bundle\n+ rake spec\n+\n Source hosted at [GitHub](http://github.com/splitrb/split).\n+\n Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues).\n-Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby)\n \n-Tests can be ran with `rake spec`\n+Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby).\n \n ### Note on Patches/Pull Requests\n \n"},"addition_count":{"kind":"number","value":10,"string":"10"},"commit_subject":{"kind":"string","value":"Clarify test running instructions"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675128,"cells":{"id":{"kind":"string","value":"10070778"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n\n## Development\n\nSource hosted at [GitHub](http://github.com/splitrb/split).\nReport Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues).\nDiscussion at [Google Groups](https://groups.google.com/d/forum/split-ruby)\n\nTests can be ran with `rake spec`\n\n### Note on Patches/Pull Requests\n\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Clarify test running instructions\n\n @@ -700,11 +700,19 @@ Over 70 different people have contributed to the project, you can see them all h\n \n ## Development\n \n+Run the tests like this:\n+\n+ # Start a Redis server in another tab.\n+ redis-server\n+\n+ bundle\n+ rake spec\n+\n Source hosted at [GitHub](http://github.com/splitrb/split).\n+\n Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues).\n-Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby)\n \n-Tests can be ran with `rake spec`\n+Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby).\n \n ### Note on Patches/Pull Requests\n \n"},"addition_count":{"kind":"number","value":10,"string":"10"},"commit_subject":{"kind":"string","value":"Clarify test running instructions"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675129,"cells":{"id":{"kind":"string","value":"10070779"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n\n## Development\n\nSource hosted at [GitHub](http://github.com/splitrb/split).\nReport Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues).\nDiscussion at [Google Groups](https://groups.google.com/d/forum/split-ruby)\n\nTests can be ran with `rake spec`\n\n### Note on Patches/Pull Requests\n\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Clarify test running instructions\n\n @@ -700,11 +700,19 @@ Over 70 different people have contributed to the project, you can see them all h\n \n ## Development\n \n+Run the tests like this:\n+\n+ # Start a Redis server in another tab.\n+ redis-server\n+\n+ bundle\n+ rake spec\n+\n Source hosted at [GitHub](http://github.com/splitrb/split).\n+\n Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues).\n-Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby)\n \n-Tests can be ran with `rake spec`\n+Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby).\n \n ### Note on Patches/Pull Requests\n \n"},"addition_count":{"kind":"number","value":10,"string":"10"},"commit_subject":{"kind":"string","value":"Clarify test running instructions"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675130,"cells":{"id":{"kind":"string","value":"10070780"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/andrew/split\"\n s.summary = %q{Rack based split testing framework}\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Added License to gemspec\n\n @@ -7,6 +7,7 @@ Gem::Specification.new do |s|\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n+ s.licenses = ['MIT']\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/andrew/split\"\n s.summary = %q{Rack based split testing framework}\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added License to gemspec"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675131,"cells":{"id":{"kind":"string","value":"10070781"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/andrew/split\"\n s.summary = %q{Rack based split testing framework}\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Added License to gemspec\n\n @@ -7,6 +7,7 @@ Gem::Specification.new do |s|\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n+ s.licenses = ['MIT']\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/andrew/split\"\n s.summary = %q{Rack based split testing framework}\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added License to gemspec"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675132,"cells":{"id":{"kind":"string","value":"10070782"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/andrew/split\"\n s.summary = %q{Rack based split testing framework}\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n \"bug_tracker_uri\" => \"https://github.com/splitrb/split/issues\",\n \"wiki_uri\" => \"https://github.com/splitrb/split/wiki\",\n \"mailing_list_uri\" => \"https://groups.google.com/d/forum/split-ruby\"\n }\n\n s.required_ruby_version = \">= 2.5.0\"\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Added License to gemspec\n\n @@ -7,6 +7,7 @@ Gem::Specification.new do |s|\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n+ s.licenses = ['MIT']\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/andrew/split\"\n s.summary = %q{Rack based split testing framework}\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Added License to gemspec"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675133,"cells":{"id":{"kind":"string","value":"10070783"},"text":{"kind":"string","value":" settings.py\n from conf.default import *\nimport os\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nLOCAL_DEVELOPMENT = True\n\nif LOCAL_DEVELOPMENT:\n import sys\n sys.path.append(os.path.dirname(__file__))\n\n# if you're sloppy.\nDJANGOPYPI_ALLOW_VERSION_OVERWRITE = False\nDJANGOPYPI_RELEASE_UPLOAD_TO = 'dists'\nLOCAL_DEVELOPMENT=True\n\n# change to False if you do not want Django's default server to serve static pages\nLOCAL_DEVELOPMENT = True\nDATABASE_ENGINE = 'sqlite3'\nDATABASE_NAME = os.path.join(here, 'devdatabase.db')\nDATABASE_USER = ''\nDATABASE_PASSWORD = ''\nDATABASE_HOST = ''\nDATABASE_PORT = ''\n\n syncing with ask repos\n\n @@ -12,7 +12,6 @@ ADMINS = (\n # if you're sloppy.\n DJANGOPYPI_ALLOW_VERSION_OVERWRITE = False\n DJANGOPYPI_RELEASE_UPLOAD_TO = 'dists'\n-LOCAL_DEVELOPMENT=True\n \n # change to False if you do not want Django's default server to serve static pages\n LOCAL_DEVELOPMENT = True\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"syncing with ask repos"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".py"},"lang":{"kind":"string","value":"py"},"license":{"kind":"string","value":"bsd-3-clause"},"repo_name":{"kind":"string","value":"ask/chishop"}}},{"rowIdx":10675134,"cells":{"id":{"kind":"string","value":"10070784"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use `variable_size_secure_compare` to stop length information leaking\n ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV[\"SPLIT_USERNAME\"]) &\n ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV[\"SPLIT_PASSWORD\"])\nend\n\n# Apps without activesupport\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Avoid variable_size_secure_compare private method (#465)\n\n\n @@ -439,9 +439,9 @@ You may want to password protect that page, you can do so with `Rack::Auth::Basi\n Split::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n- # - Use `variable_size_secure_compare` to stop length information leaking\n- ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV[\"SPLIT_USERNAME\"]) &\n- ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV[\"SPLIT_PASSWORD\"])\n+ # - Use digests to stop length information leaking\n+ ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n+ ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\n end\n \n # Apps without activesupport\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Avoid variable_size_secure_compare private method (#465)"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675135,"cells":{"id":{"kind":"string","value":"10070785"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use `variable_size_secure_compare` to stop length information leaking\n ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV[\"SPLIT_USERNAME\"]) &\n ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV[\"SPLIT_PASSWORD\"])\nend\n\n# Apps without activesupport\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Avoid variable_size_secure_compare private method (#465)\n\n\n @@ -439,9 +439,9 @@ You may want to password protect that page, you can do so with `Rack::Auth::Basi\n Split::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n- # - Use `variable_size_secure_compare` to stop length information leaking\n- ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV[\"SPLIT_USERNAME\"]) &\n- ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV[\"SPLIT_PASSWORD\"])\n+ # - Use digests to stop length information leaking\n+ ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n+ ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\n end\n \n # Apps without activesupport\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Avoid variable_size_secure_compare private method (#465)"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675136,"cells":{"id":{"kind":"string","value":"10070786"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use `variable_size_secure_compare` to stop length information leaking\n ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV[\"SPLIT_USERNAME\"]) &\n ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV[\"SPLIT_PASSWORD\"])\nend\n\n# Apps without activesupport\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Avoid variable_size_secure_compare private method (#465)\n\n\n @@ -439,9 +439,9 @@ You may want to password protect that page, you can do so with `Rack::Auth::Basi\n Split::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n- # - Use `variable_size_secure_compare` to stop length information leaking\n- ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV[\"SPLIT_USERNAME\"]) &\n- ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV[\"SPLIT_PASSWORD\"])\n+ # - Use digests to stop length information leaking\n+ ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n+ ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\n end\n \n # Apps without activesupport\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Avoid variable_size_secure_compare private method (#465)"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675137,"cells":{"id":{"kind":"string","value":"10070787"},"text":{"kind":"string","value":" configuration.rb\n # frozen_string_literal: true\n\nmodule Split\n class Configuration\n attr_accessor :ignore_ip_addresses\n attr_accessor :ignore_filter\n attr_accessor :db_failover\n attr_accessor :db_failover_on_db_error\n attr_accessor :db_failover_allow_parameter_override\n attr_accessor :allow_multiple_experiments\n attr_accessor :enabled\n attr_accessor :persistence\n attr_accessor :persistence_cookie_length\n attr_accessor :persistence_cookie_domain\n attr_accessor :algorithm\n attr_accessor :store_override\n attr_accessor :start_manually\n attr_accessor :reset_manually\n attr_accessor :on_trial\n attr_accessor :on_trial_choose\n attr_accessor :on_trial_complete\n attr_accessor :on_experiment_reset\n attr_accessor :on_experiment_delete\n attr_accessor :on_before_experiment_reset\n attr_accessor :on_experiment_winner_choose\n attr_accessor :on_before_experiment_delete\n attr_accessor :include_rails_helper\n attr_accessor :beta_probability_simulations\n attr_accessor :winning_alternative_recalculation_interval\n attr_accessor :redis\n attr_accessor :dashboard_pagination_default_per_page\n attr_accessor :cache\n\n attr_reader :experiments\n\n attr_writer :bots\n attr_writer :robot_regex\n\n def bots\n @bots ||= {\n # Indexers\n \"AdsBot-Google\" => \"Google Adwords\",\n \"Baidu\" => \"Chinese search engine\",\n \"Baiduspider\" => \"Chinese search engine\",\n \"bingbot\" => \"Microsoft bing bot\",\n \"Butterfly\" => \"Topsy Labs\",\n \"Gigabot\" => \"Gigabot spider\",\n \"Googlebot\" => \"Google spider\",\n \"MJ12bot\" => \"Majestic-12 spider\",\n \"msnbot\" => \"Microsoft bot\",\n \"rogerbot\" => \"SeoMoz spider\",\n \"PaperLiBot\" => \"PaperLi is another content curation service\",\n \"Slurp\" => \"Yahoo spider\",\n \"Sogou\" => \"Chinese search engine\",\n \"spider\" => \"generic web spider\",\n \"UnwindFetchor\" => \"Gnip crawler\",\n \"WordPress\" => \"WordPress spider\",\n \"YandexAccessibilityBot\" => \"Yandex accessibility spider\",\n \"YandexBot\" => \"Yandex spider\",\n \"YandexMobileBot\" => \"Yandex mobile spider\",\n \"ZIBB\" => \"ZIBB spider\",\n\n # HTTP libraries\n \"Apache-HttpClient\" => \"Java http library\",\n \"AppEngine-Google\" => \"Google App Engine\",\n \"curl\" => \"curl unix CLI http client\",\n \"ColdFusion\" => \"ColdFusion http library\",\n \"EventMachine HttpClient\" => \"Ruby http library\",\n \"Go http package\" => \"Go http library\",\n \"Go-http-client\" => \"Go http library\",\n \"Java\" => \"Generic Java http library\",\n \"libwww-perl\" => \"Perl client-server library loved by script kids\",\n \"lwp-trivial\" => \"Another Perl library loved by script kids\",\n \"Python-urllib\" => \"Python http library\",\n \"PycURL\" => \"Python http library\",\n \"Test Certificate Info\" => \"C http library?\",\n \"Typhoeus\" => \"Ruby http library\",\n \"Wget\" => \"wget unix CLI http client\",\n\n # URL expanders / previewers\n \"awe.sm\" => \"Awe.sm URL expander\",\n \"bitlybot\" => \"bit.ly bot\",\n \"bot@linkfluence.net\" => \"Linkfluence bot\",\n \"facebookexternalhit\" => \"facebook bot\",\n \"Facebot\" => \"Facebook crawler\",\n \"Feedfetcher-Google\" => \"Google Feedfetcher\",\n \"https://developers.google.com/+/web/snippet\" => \"Google+ Snippet Fetcher\",\n \"LinkedInBot\" => \"LinkedIn bot\",\n \"LongURL\" => \"URL expander service\",\n \"NING\" => \"NING - Yet Another Twitter Swarmer\",\n \"Pinterestbot\" => \"Pinterest Bot\",\n \"redditbot\" => \"Reddit Bot\",\n \"ShortLinkTranslate\" => \"Link shortener\",\n \"Slackbot\" => \"Slackbot link expander\",\n \"TweetmemeBot\" => \"TweetMeMe Crawler\",\n \"Twitterbot\" => \"Twitter URL expander\",\n \"UnwindFetch\" => \"Gnip URL expander\",\n \"vkShare\" => \"VKontake Sharer\",\n\n # Uptime monitoring\n \"check_http\" => \"Nagios monitor\",\n \"GoogleStackdriverMonitoring\" => \"Google Cloud monitor\",\n \"NewRelicPinger\" => \"NewRelic monitor\",\n \"Panopta\" => \"Monitoring service\",\n \"Pingdom\" => \"Pingdom monitoring\",\n \"SiteUptime\" => \"Site monitoring services\",\n \"UptimeRobot\" => \"Monitoring service\",\n\n # ???\n \"DigitalPersona Fingerprint Software\" => \"HP Fingerprint scanner\",\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n if metric_name.to_sym\n @metrics[metric_name] ||= []\n @metrics[metric_name] << Split::Experiment.new(key)\n end\n end\n end\n end\n\n def disabled?\n !enabled\n end\n\n def experiment_for(name)\n if normalized_experiments\n # TODO symbols\n normalized_experiments[name.to_sym]\n end\n end\n\n def metrics\n return @metrics if defined?(@metrics)\n @metrics = {}\n if self.experiments\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n if metric_name\n @metrics[metric_name.to_sym] ||= []\n @metrics[metric_name.to_sym] << Split::Experiment.new(key)\n end\n end\n end\n end\n @metrics\n end\n\n def normalized_experiments\n return nil if @experiments.nil?\n\n experiment_config = {}\n @experiments.keys.each do |name|\n experiment_config[name.to_sym] = {}\n end\n\n @experiments.each do |experiment_name, settings|\n alternatives = if (alts = value_for(settings, :alternatives))\n normalize_alternatives(alts)\n end\n\n experiment_data = {\n alternatives: alternatives,\n goals: value_for(settings, :goals),\n metadata: value_for(settings, :metadata),\n algorithm: value_for(settings, :algorithm),\n resettable: value_for(settings, :resettable)\n }\n\n experiment_data.each do |name, value|\n experiment_config[experiment_name.to_sym][name] = value if value != nil\n end\n end\n\n experiment_config\n end\n\n def normalize_alternatives(alternatives)\n given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|\n p, n = a\n if percent = value_for(v, :percent)\n [p + percent, n + 1]\n else\n a\n end\n end\n\n num_without_probability = alternatives.length - num_with_probability\n unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)\n\n if num_with_probability.nonzero?\n alternatives = alternatives.map do |v|\n if (name = value_for(v, :name)) && (percent = value_for(v, :percent))\n { name => percent / 100.0 }\n elsif name = value_for(v, :name)\n { name => unassigned_probability }\n else\n { v => unassigned_probability }\n end\n end\n\n [alternatives.shift, alternatives]\n else\n alternatives = alternatives.dup\n [alternatives.shift, alternatives]\n end\n end\n\n def robot_regex\n @robot_regex ||= /\\b(?:#{escaped_bots.join('|')})\\b|\\A\\W*\\z/i\n end\n\n def initialize\n @ignore_ip_addresses = []\n @ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }\n @db_failover = false\n @db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here\n @on_experiment_reset = proc { |experiment| }\n @on_experiment_delete = proc { |experiment| }\n @on_before_experiment_reset = proc { |experiment| }\n @on_before_experiment_delete = proc { |experiment| }\n @on_experiment_winner_choose = proc { |experiment| }\n @db_failover_allow_parameter_override = false\n @allow_multiple_experiments = false\n @enabled = true\n @experiments = {}\n @persistence = Split::Persistence::SessionAdapter\n @persistence_cookie_length = 31536000 # One year from now\n @persistence_cookie_domain = nil\n @algorithm = Split::Algorithms::WeightedSample\n @include_rails_helper = true\n @beta_probability_simulations = 10000\n @winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day\n @redis = ENV.fetch(ENV.fetch(\"REDIS_PROVIDER\", \"REDIS_URL\"), \"redis://localhost:6379\")\n @dashboard_pagination_default_per_page = 10\n end\n\n private\n def value_for(hash, key)\n if hash.kind_of?(Hash)\n hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]\n end\n end\n\n def escaped_bots\n bots.map { |key, _| Regexp.escape(key) }\n end\n end\nend\n\n fix to pass spec\n\n @@ -111,9 +111,9 @@ module Split\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n- if metric_name.to_sym\n- @metrics[metric_name] ||= []\n- @metrics[metric_name] << Split::Experiment.new(key)\n+ if metric_name\n+ @metrics[metric_name.to_sym] ||= []\n+ @metrics[metric_name.to_sym] << Split::Experiment.new(key)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"fix to pass spec"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675138,"cells":{"id":{"kind":"string","value":"10070788"},"text":{"kind":"string","value":" configuration.rb\n # frozen_string_literal: true\n\nmodule Split\n class Configuration\n attr_accessor :ignore_ip_addresses\n attr_accessor :ignore_filter\n attr_accessor :db_failover\n attr_accessor :db_failover_on_db_error\n attr_accessor :db_failover_allow_parameter_override\n attr_accessor :allow_multiple_experiments\n attr_accessor :enabled\n attr_accessor :persistence\n attr_accessor :persistence_cookie_length\n attr_accessor :persistence_cookie_domain\n attr_accessor :algorithm\n attr_accessor :store_override\n attr_accessor :start_manually\n attr_accessor :reset_manually\n attr_accessor :on_trial\n attr_accessor :on_trial_choose\n attr_accessor :on_trial_complete\n attr_accessor :on_experiment_reset\n attr_accessor :on_experiment_delete\n attr_accessor :on_before_experiment_reset\n attr_accessor :on_experiment_winner_choose\n attr_accessor :on_before_experiment_delete\n attr_accessor :include_rails_helper\n attr_accessor :beta_probability_simulations\n attr_accessor :winning_alternative_recalculation_interval\n attr_accessor :redis\n attr_accessor :dashboard_pagination_default_per_page\n attr_accessor :cache\n\n attr_reader :experiments\n\n attr_writer :bots\n attr_writer :robot_regex\n\n def bots\n @bots ||= {\n # Indexers\n \"AdsBot-Google\" => \"Google Adwords\",\n \"Baidu\" => \"Chinese search engine\",\n \"Baiduspider\" => \"Chinese search engine\",\n \"bingbot\" => \"Microsoft bing bot\",\n \"Butterfly\" => \"Topsy Labs\",\n \"Gigabot\" => \"Gigabot spider\",\n \"Googlebot\" => \"Google spider\",\n \"MJ12bot\" => \"Majestic-12 spider\",\n \"msnbot\" => \"Microsoft bot\",\n \"rogerbot\" => \"SeoMoz spider\",\n \"PaperLiBot\" => \"PaperLi is another content curation service\",\n \"Slurp\" => \"Yahoo spider\",\n \"Sogou\" => \"Chinese search engine\",\n \"spider\" => \"generic web spider\",\n \"UnwindFetchor\" => \"Gnip crawler\",\n \"WordPress\" => \"WordPress spider\",\n \"YandexAccessibilityBot\" => \"Yandex accessibility spider\",\n \"YandexBot\" => \"Yandex spider\",\n \"YandexMobileBot\" => \"Yandex mobile spider\",\n \"ZIBB\" => \"ZIBB spider\",\n\n # HTTP libraries\n \"Apache-HttpClient\" => \"Java http library\",\n \"AppEngine-Google\" => \"Google App Engine\",\n \"curl\" => \"curl unix CLI http client\",\n \"ColdFusion\" => \"ColdFusion http library\",\n \"EventMachine HttpClient\" => \"Ruby http library\",\n \"Go http package\" => \"Go http library\",\n \"Go-http-client\" => \"Go http library\",\n \"Java\" => \"Generic Java http library\",\n \"libwww-perl\" => \"Perl client-server library loved by script kids\",\n \"lwp-trivial\" => \"Another Perl library loved by script kids\",\n \"Python-urllib\" => \"Python http library\",\n \"PycURL\" => \"Python http library\",\n \"Test Certificate Info\" => \"C http library?\",\n \"Typhoeus\" => \"Ruby http library\",\n \"Wget\" => \"wget unix CLI http client\",\n\n # URL expanders / previewers\n \"awe.sm\" => \"Awe.sm URL expander\",\n \"bitlybot\" => \"bit.ly bot\",\n \"bot@linkfluence.net\" => \"Linkfluence bot\",\n \"facebookexternalhit\" => \"facebook bot\",\n \"Facebot\" => \"Facebook crawler\",\n \"Feedfetcher-Google\" => \"Google Feedfetcher\",\n \"https://developers.google.com/+/web/snippet\" => \"Google+ Snippet Fetcher\",\n \"LinkedInBot\" => \"LinkedIn bot\",\n \"LongURL\" => \"URL expander service\",\n \"NING\" => \"NING - Yet Another Twitter Swarmer\",\n \"Pinterestbot\" => \"Pinterest Bot\",\n \"redditbot\" => \"Reddit Bot\",\n \"ShortLinkTranslate\" => \"Link shortener\",\n \"Slackbot\" => \"Slackbot link expander\",\n \"TweetmemeBot\" => \"TweetMeMe Crawler\",\n \"Twitterbot\" => \"Twitter URL expander\",\n \"UnwindFetch\" => \"Gnip URL expander\",\n \"vkShare\" => \"VKontake Sharer\",\n\n # Uptime monitoring\n \"check_http\" => \"Nagios monitor\",\n \"GoogleStackdriverMonitoring\" => \"Google Cloud monitor\",\n \"NewRelicPinger\" => \"NewRelic monitor\",\n \"Panopta\" => \"Monitoring service\",\n \"Pingdom\" => \"Pingdom monitoring\",\n \"SiteUptime\" => \"Site monitoring services\",\n \"UptimeRobot\" => \"Monitoring service\",\n\n # ???\n \"DigitalPersona Fingerprint Software\" => \"HP Fingerprint scanner\",\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n if metric_name.to_sym\n @metrics[metric_name] ||= []\n @metrics[metric_name] << Split::Experiment.new(key)\n end\n end\n end\n end\n\n def disabled?\n !enabled\n end\n\n def experiment_for(name)\n if normalized_experiments\n # TODO symbols\n normalized_experiments[name.to_sym]\n end\n end\n\n def metrics\n return @metrics if defined?(@metrics)\n @metrics = {}\n if self.experiments\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n if metric_name\n @metrics[metric_name.to_sym] ||= []\n @metrics[metric_name.to_sym] << Split::Experiment.new(key)\n end\n end\n end\n end\n @metrics\n end\n\n def normalized_experiments\n return nil if @experiments.nil?\n\n experiment_config = {}\n @experiments.keys.each do |name|\n experiment_config[name.to_sym] = {}\n end\n\n @experiments.each do |experiment_name, settings|\n alternatives = if (alts = value_for(settings, :alternatives))\n normalize_alternatives(alts)\n end\n\n experiment_data = {\n alternatives: alternatives,\n goals: value_for(settings, :goals),\n metadata: value_for(settings, :metadata),\n algorithm: value_for(settings, :algorithm),\n resettable: value_for(settings, :resettable)\n }\n\n experiment_data.each do |name, value|\n experiment_config[experiment_name.to_sym][name] = value if value != nil\n end\n end\n\n experiment_config\n end\n\n def normalize_alternatives(alternatives)\n given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|\n p, n = a\n if percent = value_for(v, :percent)\n [p + percent, n + 1]\n else\n a\n end\n end\n\n num_without_probability = alternatives.length - num_with_probability\n unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)\n\n if num_with_probability.nonzero?\n alternatives = alternatives.map do |v|\n if (name = value_for(v, :name)) && (percent = value_for(v, :percent))\n { name => percent / 100.0 }\n elsif name = value_for(v, :name)\n { name => unassigned_probability }\n else\n { v => unassigned_probability }\n end\n end\n\n [alternatives.shift, alternatives]\n else\n alternatives = alternatives.dup\n [alternatives.shift, alternatives]\n end\n end\n\n def robot_regex\n @robot_regex ||= /\\b(?:#{escaped_bots.join('|')})\\b|\\A\\W*\\z/i\n end\n\n def initialize\n @ignore_ip_addresses = []\n @ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }\n @db_failover = false\n @db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here\n @on_experiment_reset = proc { |experiment| }\n @on_experiment_delete = proc { |experiment| }\n @on_before_experiment_reset = proc { |experiment| }\n @on_before_experiment_delete = proc { |experiment| }\n @on_experiment_winner_choose = proc { |experiment| }\n @db_failover_allow_parameter_override = false\n @allow_multiple_experiments = false\n @enabled = true\n @experiments = {}\n @persistence = Split::Persistence::SessionAdapter\n @persistence_cookie_length = 31536000 # One year from now\n @persistence_cookie_domain = nil\n @algorithm = Split::Algorithms::WeightedSample\n @include_rails_helper = true\n @beta_probability_simulations = 10000\n @winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day\n @redis = ENV.fetch(ENV.fetch(\"REDIS_PROVIDER\", \"REDIS_URL\"), \"redis://localhost:6379\")\n @dashboard_pagination_default_per_page = 10\n end\n\n private\n def value_for(hash, key)\n if hash.kind_of?(Hash)\n hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]\n end\n end\n\n def escaped_bots\n bots.map { |key, _| Regexp.escape(key) }\n end\n end\nend\n\n fix to pass spec\n\n @@ -111,9 +111,9 @@ module Split\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n- if metric_name.to_sym\n- @metrics[metric_name] ||= []\n- @metrics[metric_name] << Split::Experiment.new(key)\n+ if metric_name\n+ @metrics[metric_name.to_sym] ||= []\n+ @metrics[metric_name.to_sym] << Split::Experiment.new(key)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"fix to pass spec"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675139,"cells":{"id":{"kind":"string","value":"10070789"},"text":{"kind":"string","value":" configuration.rb\n # frozen_string_literal: true\n\nmodule Split\n class Configuration\n attr_accessor :ignore_ip_addresses\n attr_accessor :ignore_filter\n attr_accessor :db_failover\n attr_accessor :db_failover_on_db_error\n attr_accessor :db_failover_allow_parameter_override\n attr_accessor :allow_multiple_experiments\n attr_accessor :enabled\n attr_accessor :persistence\n attr_accessor :persistence_cookie_length\n attr_accessor :persistence_cookie_domain\n attr_accessor :algorithm\n attr_accessor :store_override\n attr_accessor :start_manually\n attr_accessor :reset_manually\n attr_accessor :on_trial\n attr_accessor :on_trial_choose\n attr_accessor :on_trial_complete\n attr_accessor :on_experiment_reset\n attr_accessor :on_experiment_delete\n attr_accessor :on_before_experiment_reset\n attr_accessor :on_experiment_winner_choose\n attr_accessor :on_before_experiment_delete\n attr_accessor :include_rails_helper\n attr_accessor :beta_probability_simulations\n attr_accessor :winning_alternative_recalculation_interval\n attr_accessor :redis\n attr_accessor :dashboard_pagination_default_per_page\n attr_accessor :cache\n\n attr_reader :experiments\n\n attr_writer :bots\n attr_writer :robot_regex\n\n def bots\n @bots ||= {\n # Indexers\n \"AdsBot-Google\" => \"Google Adwords\",\n \"Baidu\" => \"Chinese search engine\",\n \"Baiduspider\" => \"Chinese search engine\",\n \"bingbot\" => \"Microsoft bing bot\",\n \"Butterfly\" => \"Topsy Labs\",\n \"Gigabot\" => \"Gigabot spider\",\n \"Googlebot\" => \"Google spider\",\n \"MJ12bot\" => \"Majestic-12 spider\",\n \"msnbot\" => \"Microsoft bot\",\n \"rogerbot\" => \"SeoMoz spider\",\n \"PaperLiBot\" => \"PaperLi is another content curation service\",\n \"Slurp\" => \"Yahoo spider\",\n \"Sogou\" => \"Chinese search engine\",\n \"spider\" => \"generic web spider\",\n \"UnwindFetchor\" => \"Gnip crawler\",\n \"WordPress\" => \"WordPress spider\",\n \"YandexAccessibilityBot\" => \"Yandex accessibility spider\",\n \"YandexBot\" => \"Yandex spider\",\n \"YandexMobileBot\" => \"Yandex mobile spider\",\n \"ZIBB\" => \"ZIBB spider\",\n\n # HTTP libraries\n \"Apache-HttpClient\" => \"Java http library\",\n \"AppEngine-Google\" => \"Google App Engine\",\n \"curl\" => \"curl unix CLI http client\",\n \"ColdFusion\" => \"ColdFusion http library\",\n \"EventMachine HttpClient\" => \"Ruby http library\",\n \"Go http package\" => \"Go http library\",\n \"Go-http-client\" => \"Go http library\",\n \"Java\" => \"Generic Java http library\",\n \"libwww-perl\" => \"Perl client-server library loved by script kids\",\n \"lwp-trivial\" => \"Another Perl library loved by script kids\",\n \"Python-urllib\" => \"Python http library\",\n \"PycURL\" => \"Python http library\",\n \"Test Certificate Info\" => \"C http library?\",\n \"Typhoeus\" => \"Ruby http library\",\n \"Wget\" => \"wget unix CLI http client\",\n\n # URL expanders / previewers\n \"awe.sm\" => \"Awe.sm URL expander\",\n \"bitlybot\" => \"bit.ly bot\",\n \"bot@linkfluence.net\" => \"Linkfluence bot\",\n \"facebookexternalhit\" => \"facebook bot\",\n \"Facebot\" => \"Facebook crawler\",\n \"Feedfetcher-Google\" => \"Google Feedfetcher\",\n \"https://developers.google.com/+/web/snippet\" => \"Google+ Snippet Fetcher\",\n \"LinkedInBot\" => \"LinkedIn bot\",\n \"LongURL\" => \"URL expander service\",\n \"NING\" => \"NING - Yet Another Twitter Swarmer\",\n \"Pinterestbot\" => \"Pinterest Bot\",\n \"redditbot\" => \"Reddit Bot\",\n \"ShortLinkTranslate\" => \"Link shortener\",\n \"Slackbot\" => \"Slackbot link expander\",\n \"TweetmemeBot\" => \"TweetMeMe Crawler\",\n \"Twitterbot\" => \"Twitter URL expander\",\n \"UnwindFetch\" => \"Gnip URL expander\",\n \"vkShare\" => \"VKontake Sharer\",\n\n # Uptime monitoring\n \"check_http\" => \"Nagios monitor\",\n \"GoogleStackdriverMonitoring\" => \"Google Cloud monitor\",\n \"NewRelicPinger\" => \"NewRelic monitor\",\n \"Panopta\" => \"Monitoring service\",\n \"Pingdom\" => \"Pingdom monitoring\",\n \"SiteUptime\" => \"Site monitoring services\",\n \"UptimeRobot\" => \"Monitoring service\",\n\n # ???\n \"DigitalPersona Fingerprint Software\" => \"HP Fingerprint scanner\",\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n if metric_name.to_sym\n @metrics[metric_name] ||= []\n @metrics[metric_name] << Split::Experiment.new(key)\n end\n end\n end\n end\n\n def disabled?\n !enabled\n end\n\n def experiment_for(name)\n if normalized_experiments\n # TODO symbols\n normalized_experiments[name.to_sym]\n end\n end\n\n def metrics\n return @metrics if defined?(@metrics)\n @metrics = {}\n if self.experiments\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n if metric_name\n @metrics[metric_name.to_sym] ||= []\n @metrics[metric_name.to_sym] << Split::Experiment.new(key)\n end\n end\n end\n end\n @metrics\n end\n\n def normalized_experiments\n return nil if @experiments.nil?\n\n experiment_config = {}\n @experiments.keys.each do |name|\n experiment_config[name.to_sym] = {}\n end\n\n @experiments.each do |experiment_name, settings|\n alternatives = if (alts = value_for(settings, :alternatives))\n normalize_alternatives(alts)\n end\n\n experiment_data = {\n alternatives: alternatives,\n goals: value_for(settings, :goals),\n metadata: value_for(settings, :metadata),\n algorithm: value_for(settings, :algorithm),\n resettable: value_for(settings, :resettable)\n }\n\n experiment_data.each do |name, value|\n experiment_config[experiment_name.to_sym][name] = value if value != nil\n end\n end\n\n experiment_config\n end\n\n def normalize_alternatives(alternatives)\n given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v|\n p, n = a\n if percent = value_for(v, :percent)\n [p + percent, n + 1]\n else\n a\n end\n end\n\n num_without_probability = alternatives.length - num_with_probability\n unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0)\n\n if num_with_probability.nonzero?\n alternatives = alternatives.map do |v|\n if (name = value_for(v, :name)) && (percent = value_for(v, :percent))\n { name => percent / 100.0 }\n elsif name = value_for(v, :name)\n { name => unassigned_probability }\n else\n { v => unassigned_probability }\n end\n end\n\n [alternatives.shift, alternatives]\n else\n alternatives = alternatives.dup\n [alternatives.shift, alternatives]\n end\n end\n\n def robot_regex\n @robot_regex ||= /\\b(?:#{escaped_bots.join('|')})\\b|\\A\\W*\\z/i\n end\n\n def initialize\n @ignore_ip_addresses = []\n @ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? }\n @db_failover = false\n @db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here\n @on_experiment_reset = proc { |experiment| }\n @on_experiment_delete = proc { |experiment| }\n @on_before_experiment_reset = proc { |experiment| }\n @on_before_experiment_delete = proc { |experiment| }\n @on_experiment_winner_choose = proc { |experiment| }\n @db_failover_allow_parameter_override = false\n @allow_multiple_experiments = false\n @enabled = true\n @experiments = {}\n @persistence = Split::Persistence::SessionAdapter\n @persistence_cookie_length = 31536000 # One year from now\n @persistence_cookie_domain = nil\n @algorithm = Split::Algorithms::WeightedSample\n @include_rails_helper = true\n @beta_probability_simulations = 10000\n @winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day\n @redis = ENV.fetch(ENV.fetch(\"REDIS_PROVIDER\", \"REDIS_URL\"), \"redis://localhost:6379\")\n @dashboard_pagination_default_per_page = 10\n end\n\n private\n def value_for(hash, key)\n if hash.kind_of?(Hash)\n hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym]\n end\n end\n\n def escaped_bots\n bots.map { |key, _| Regexp.escape(key) }\n end\n end\nend\n\n fix to pass spec\n\n @@ -111,9 +111,9 @@ module Split\n self.experiments.each do |key, value|\n metrics = value_for(value, :metric) rescue nil\n Array(metrics).each do |metric_name|\n- if metric_name.to_sym\n- @metrics[metric_name] ||= []\n- @metrics[metric_name] << Split::Experiment.new(key)\n+ if metric_name\n+ @metrics[metric_name.to_sym] ||= []\n+ @metrics[metric_name.to_sym] << Split::Experiment.new(key)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"fix to pass spec"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675140,"cells":{"id":{"kind":"string","value":"10070790"},"text":{"kind":"string","value":" helper.rb\n # frozen_string_literal: true\n\nmodule Split\n module Helper\n OVERRIDE_PARAM_NAME = \"ab_test\"\n\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n control_variable(experiment.control)\n end\n rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e\n raise(e) unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n\n if Split.configuration.db_failover_allow_parameter_override\n alternative = override_alternative(experiment.name) if override_present?(experiment.name)\n alternative = control_variable(experiment.control) if split_generically_disabled?\n end\n ensure\n alternative ||= control_variable(experiment.control)\n end\n\n if block_given?\n metadata = experiment.metadata[alternative] if experiment.metadata\n yield(alternative, metadata || {})\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def override(experiment_name, alternatives)\n return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n end\n\n def begin_experiment(experiment, alternative_name)\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n finish_experiment(experiment, options.merge(goals: goals))\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n minor code cleanup\n\n @@ -45,7 +45,7 @@ module Split\n end\n \n def override(experiment_name, alternatives)\n- return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n+ params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n end\n \n def begin_experiment(experiment, alternative_name)\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"minor code cleanup"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675141,"cells":{"id":{"kind":"string","value":"10070791"},"text":{"kind":"string","value":" helper.rb\n # frozen_string_literal: true\n\nmodule Split\n module Helper\n OVERRIDE_PARAM_NAME = \"ab_test\"\n\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n control_variable(experiment.control)\n end\n rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e\n raise(e) unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n\n if Split.configuration.db_failover_allow_parameter_override\n alternative = override_alternative(experiment.name) if override_present?(experiment.name)\n alternative = control_variable(experiment.control) if split_generically_disabled?\n end\n ensure\n alternative ||= control_variable(experiment.control)\n end\n\n if block_given?\n metadata = experiment.metadata[alternative] if experiment.metadata\n yield(alternative, metadata || {})\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def override(experiment_name, alternatives)\n return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n end\n\n def begin_experiment(experiment, alternative_name)\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n finish_experiment(experiment, options.merge(goals: goals))\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n minor code cleanup\n\n @@ -45,7 +45,7 @@ module Split\n end\n \n def override(experiment_name, alternatives)\n- return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n+ params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n end\n \n def begin_experiment(experiment, alternative_name)\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"minor code cleanup"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675142,"cells":{"id":{"kind":"string","value":"10070792"},"text":{"kind":"string","value":" helper.rb\n # frozen_string_literal: true\n\nmodule Split\n module Helper\n OVERRIDE_PARAM_NAME = \"ab_test\"\n\n module_function\n\n def ab_test(metric_descriptor, control = nil, *alternatives)\n begin\n experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives)\n alternative = if Split.configuration.enabled && !exclude_visitor?\n experiment.save\n raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil?\n trial = Trial.new(user: ab_user, experiment: experiment,\n override: override_alternative(experiment.name), exclude: exclude_visitor?,\n disabled: split_generically_disabled?)\n alt = trial.choose!(self)\n alt ? alt.name : nil\n else\n control_variable(experiment.control)\n end\n rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e\n raise(e) unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n\n if Split.configuration.db_failover_allow_parameter_override\n alternative = override_alternative(experiment.name) if override_present?(experiment.name)\n alternative = control_variable(experiment.control) if split_generically_disabled?\n end\n ensure\n alternative ||= control_variable(experiment.control)\n end\n\n if block_given?\n metadata = experiment.metadata[alternative] if experiment.metadata\n yield(alternative, metadata || {})\n else\n alternative\n end\n end\n\n def reset!(experiment)\n ab_user.delete(experiment.key)\n end\n\n def override(experiment_name, alternatives)\n return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n end\n\n def begin_experiment(experiment, alternative_name)\n true\n else\n alternative_name = ab_user[experiment.key]\n trial = Trial.new(\n user: ab_user,\n experiment: experiment,\n alternative: alternative_name,\n goals: options[:goals],\n )\n\n trial.complete!(self)\n\n if should_reset\n reset!(experiment)\n else\n ab_user[experiment.finished_key] = true\n end\n end\n end\n\n def ab_finished(metric_descriptor, options = { reset: true })\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, goals = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n next if override_present?(experiment.key)\n finish_experiment(experiment, options.merge(goals: goals))\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_record_extra_info(metric_descriptor, key, value = 1)\n return if exclude_visitor? || Split.configuration.disabled?\n metric_descriptor, _ = normalize_metric(metric_descriptor)\n experiments = Metric.possible_experiments(metric_descriptor)\n\n if experiments.any?\n experiments.each do |experiment|\n alternative_name = ab_user[experiment.key]\n\n if alternative_name\n alternative = experiment.alternatives.find { |alt| alt.name == alternative_name }\n alternative.record_extra_info(key, value) if alternative\n end\n end\n end\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def ab_active_experiments\n ab_user.active_experiments\n rescue => e\n raise unless Split.configuration.db_failover\n Split.configuration.db_failover_on_db_error.call(e)\n end\n\n def override_present?(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative(experiment_name)\n override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name)\n end\n\n def override_alternative_by_params(experiment_name)\n defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name]\n end\n\n def override_alternative_by_cookies(experiment_name)\n return unless defined?(request)\n\n if request.cookies && request.cookies.key?(\"split_override\")\n experiments = JSON.parse(request.cookies[\"split_override\"]) rescue {}\n experiments[experiment_name]\n end\n end\n\n def split_generically_disabled?\n defined?(params) && params[\"SPLIT_DISABLE\"]\n end\n\n def ab_user\n @ab_user ||= User.new(self)\n end\n\n def exclude_visitor?\n defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?)\n end\n\n def is_robot?\n defined?(request) && request.user_agent =~ Split.configuration.robot_regex\n end\n\n def is_preview?\n defined?(request) && defined?(request.headers) && request.headers[\"x-purpose\"] == \"preview\"\n end\n\n def is_ignored_ip_address?\n return false if Split.configuration.ignore_ip_addresses.empty?\n\n Split.configuration.ignore_ip_addresses.each do |ip|\n return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip))\n end\n false\n end\n\n def active_experiments\n ab_user.active_experiments\n end\n\n def normalize_metric(metric_descriptor)\n if Hash === metric_descriptor\n experiment_name = metric_descriptor.keys.first\n goals = Array(metric_descriptor.values.first)\n else\n experiment_name = metric_descriptor\n goals = []\n end\n return experiment_name, goals\n end\n\n def control_variable(control)\n Hash === control ? control.keys.first.to_s : control.to_s\n end\n end\nend\n\n minor code cleanup\n\n @@ -45,7 +45,7 @@ module Split\n end\n \n def override(experiment_name, alternatives)\n- return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n+ params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name])\n end\n \n def begin_experiment(experiment, alternative_name)\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"minor code cleanup"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675143,"cells":{"id":{"kind":"string","value":"10070793"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should call the on_before_experiment_delete hook\" do\n expect(Split.configuration.on_before_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should reset the start time if the experiment should be manually started\" do\n Split.configuration.start_manually = true\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n end\n\nend\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded\n\nIt looks like when this feature was added, the calculation of winning\nalternatives was meant to take place only once per day.\n\nThe #calc_winning_alternatives method was never called, which was meant\nto be saving the experiment's last calc_time. Update the experiment view\nto call this method instead of the #estimate_winning_alternative method\ndirectly. Fix caching so that the #calc_time= method is called, rather\nthan assigning to a local variable. Update calc_time so that number of\ndays since epoch is stored, rather than the day of month (1-31). Ensure\nwe're comparing integer values, rather than the string value Redis\nreturns from #hget.\n\n @@ -428,6 +428,13 @@ describe Split::Experiment do\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n+\n+ it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n+ experiment = Split::ExperimentCatalog.find_or_create({'link_color3' => [\"purchase\", \"refund\"]}, 'blue', 'red', 'green')\n+ experiment_calc_time = Time.now.utc.to_i / 86400\n+ experiment.calc_time = experiment_calc_time\n+ expect(experiment.calc_winning_alternatives).to be nil\n+ end\n end\n \n end\n"},"addition_count":{"kind":"number","value":7,"string":"7"},"commit_subject":{"kind":"string","value":"Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675144,"cells":{"id":{"kind":"string","value":"10070794"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should call the on_before_experiment_delete hook\" do\n expect(Split.configuration.on_before_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should reset the start time if the experiment should be manually started\" do\n Split.configuration.start_manually = true\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n end\n\nend\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded\n\nIt looks like when this feature was added, the calculation of winning\nalternatives was meant to take place only once per day.\n\nThe #calc_winning_alternatives method was never called, which was meant\nto be saving the experiment's last calc_time. Update the experiment view\nto call this method instead of the #estimate_winning_alternative method\ndirectly. Fix caching so that the #calc_time= method is called, rather\nthan assigning to a local variable. Update calc_time so that number of\ndays since epoch is stored, rather than the day of month (1-31). Ensure\nwe're comparing integer values, rather than the string value Redis\nreturns from #hget.\n\n @@ -428,6 +428,13 @@ describe Split::Experiment do\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n+\n+ it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n+ experiment = Split::ExperimentCatalog.find_or_create({'link_color3' => [\"purchase\", \"refund\"]}, 'blue', 'red', 'green')\n+ experiment_calc_time = Time.now.utc.to_i / 86400\n+ experiment.calc_time = experiment_calc_time\n+ expect(experiment.calc_winning_alternatives).to be nil\n+ end\n end\n \n end\n"},"addition_count":{"kind":"number","value":7,"string":"7"},"commit_subject":{"kind":"string","value":"Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675145,"cells":{"id":{"kind":"string","value":"10070795"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should call the on_before_experiment_delete hook\" do\n expect(Split.configuration.on_before_experiment_delete).to receive(:call)\n experiment.delete\n end\n\n it \"should reset the start time if the experiment should be manually started\" do\n Split.configuration.start_manually = true\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n end\n\nend\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded\n\nIt looks like when this feature was added, the calculation of winning\nalternatives was meant to take place only once per day.\n\nThe #calc_winning_alternatives method was never called, which was meant\nto be saving the experiment's last calc_time. Update the experiment view\nto call this method instead of the #estimate_winning_alternative method\ndirectly. Fix caching so that the #calc_time= method is called, rather\nthan assigning to a local variable. Update calc_time so that number of\ndays since epoch is stored, rather than the day of month (1-31). Ensure\nwe're comparing integer values, rather than the string value Redis\nreturns from #hget.\n\n @@ -428,6 +428,13 @@ describe Split::Experiment do\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n+\n+ it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n+ experiment = Split::ExperimentCatalog.find_or_create({'link_color3' => [\"purchase\", \"refund\"]}, 'blue', 'red', 'green')\n+ experiment_calc_time = Time.now.utc.to_i / 86400\n+ experiment.calc_time = experiment_calc_time\n+ expect(experiment.calc_winning_alternatives).to be nil\n+ end\n end\n \n end\n"},"addition_count":{"kind":"number","value":7,"string":"7"},"commit_subject":{"kind":"string","value":"Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675146,"cells":{"id":{"kind":"string","value":"10070796"},"text":{"kind":"string","value":" _controls.erb\n <% if experiment.has_winner? %>\n
\" method='post' onclick=\"return confirmReopen()\">\n \n
\n<% else %>\n <% if experiment.cohorting_disabled? %>\n
\" method='post' onclick=\"return confirmEnableCohorting()\">\n \n \n
\n <% else %>\n \n \n<% end %>\n
\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n<% if experiment.start_time %>\n
\" method='post' onclick=\"return confirmReset()\">\n \n
\n<% else%>\n
\" method='post'>\n \n
\n<% end %>\n
\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n\n Merge pull request #352 from craigmcnamara/fix-delete-path\n\nWhoops. Forgot to update the delete path.\n @@ -12,7 +12,7 @@\n \n \n <% end %>\n-
\" method='post' onclick=\"return confirmDelete()\">\n+\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #352 from craigmcnamara/fix-delete-path"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675147,"cells":{"id":{"kind":"string","value":"10070797"},"text":{"kind":"string","value":" _controls.erb\n <% if experiment.has_winner? %>\n
\" method='post' onclick=\"return confirmReopen()\">\n \n
\n<% else %>\n <% if experiment.cohorting_disabled? %>\n
\" method='post' onclick=\"return confirmEnableCohorting()\">\n \n \n
\n <% else %>\n \n \n<% end %>\n
\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n<% if experiment.start_time %>\n
\" method='post' onclick=\"return confirmReset()\">\n \n
\n<% else%>\n
\" method='post'>\n \n
\n<% end %>\n
\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n\n Merge pull request #352 from craigmcnamara/fix-delete-path\n\nWhoops. Forgot to update the delete path.\n @@ -12,7 +12,7 @@\n \n \n <% end %>\n-
\" method='post' onclick=\"return confirmDelete()\">\n+\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #352 from craigmcnamara/fix-delete-path"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675148,"cells":{"id":{"kind":"string","value":"10070798"},"text":{"kind":"string","value":" _controls.erb\n <% if experiment.has_winner? %>\n
\" method='post' onclick=\"return confirmReopen()\">\n \n
\n<% else %>\n <% if experiment.cohorting_disabled? %>\n
\" method='post' onclick=\"return confirmEnableCohorting()\">\n \n \n
\n <% else %>\n \n \n<% end %>\n
\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n<% if experiment.start_time %>\n
\" method='post' onclick=\"return confirmReset()\">\n \n
\n<% else%>\n
\" method='post'>\n \n
\n<% end %>\n
\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n\n Merge pull request #352 from craigmcnamara/fix-delete-path\n\nWhoops. Forgot to update the delete path.\n @@ -12,7 +12,7 @@\n \n \n <% end %>\n-
\" method='post' onclick=\"return confirmDelete()\">\n+\" method='post' onclick=\"return confirmDelete()\">\n \n \n
\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #352 from craigmcnamara/fix-delete-path"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675149,"cells":{"id":{"kind":"string","value":"10070799"},"text":{"kind":"string","value":" CHANGELOG.md\n ## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Force experiment does not count for metrics (@andrehjr, #637)\n- Fix cleanup_old_versions! misbehaviour (@serggl, #661)\n\nFeatures:\n- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625)\n- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616)\n- Introduce enable/disable experiment cohorting (@robin-phung, #615)\n- Add on_experiment_winner_choose callback (@GenaMinenkov, #574)\n- Add Split::Cache to reduce load on Redis (@rdh, #648)\n- Caching based optimization in the experiment#save path (@amangup, #652)\n- Adds config option for cookie domain (@joedelia, #664)\n\nMisc:\n- Drop support for Ruby < 2.5 (@andrehjr, #627)\n- Drop support for Rails < 5 (@andrehjr, #607)\n- Bump minimum required redis to 4.2 (@andrehjr, #628)\n- Removed repeated loading from config (@robin-phung, #619)\n- Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632)\n- Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631)\n- Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630)\n- Fix typo of in `Split::Trial` class variable (TomasBarry, #644)\n- Single HSET to update values, instead of multiple ones (@andrehjr, #640)\n- Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659)\n- Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639)\n- Adding documentation related to what is stored on cookies. (@andrehjr, #634)\n- Keep railtie defined under the Split gem namespace (@avit, #666)\n- Update RSpec helper to support block syntax (@clowder, #665)\n\n## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n\n## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release.\n- Make dashboard pagination default \"per\" param configurable (@alopatin, #597)\n\nBugfixes:\n- Fix `force_alternative` for experiments with incremented version (@giraffate, #568)\n- Persist alternative weights (@giraffate, #570)\n- Combined experiment performance improvements (@gnanou, #575)\n- Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577)\n- When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582)\n\nMisc:\n- Remove `rubyforge_project` from gemspec (@giraffate, #583)\n- Fix URLs to replace http with https (@giraffate , #584)\n- Lazily include split helpers in ActionController::Base (@hasghari, #586)\n- Fix unused variable warnings (@andrehjr, #592)\n- Fix ruby warnings (@andrehjr, #593)\n- Update rubocop.yml config (@andrehjr, #594)\n- Add frozen_string_literal to all files that were missing it (@andrehjr, #595)\n\n## 3.3.2 (April 12th, 2019)\n\nFeatures:\n- Added uptime robot to configuration.rb (@razel1982, #556)\n- Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555)\n\nBugfixes:\n- Fix error message interpolation (@hanibash, #553)\n- Fix Bigdecimal warnings (@agraves, #551)\n- Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544)\n- Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr)\n\nMisc:\n- Update travis to add Rails 6 (@edmilton, #559)\n- Fix broken specs in developement environment (@dougpetronilio, #557)\n\n## 3.3.1 (January 11th, 2019)\n\nFeatures:\n- Filter some more bots (@janosch-x, #542)\n\nBugfixes:\n- Fix Dashboard Pagination Helper typo (@cattekin, #541)\n- Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539)\n- fix user participating alternative not found (@NaturalHokke, #536)\n\nMisc:\n- Tweak RSpec instructions (@eliotsykes, #540)\n- Improve README regarding rspec usage (@vermaxik, #538)\n\n## 3.3.0 (August 13th, 2018)\n\nFeatures:\n\n- Added pagination for dashboard (@GeorgeGorbanev, #518)\n- Add Facebot crawler to list of bots (@pfeiffer, #530)\n- Ignore previewing requests (@pfeiffer, #531)\n- Fix binding of ignore_filter (@pfeiffer, #533)\n\nBugfixes:\n\n- Fix cookie header duplication (@andrehjr, #522)\n\nPerformance:\n\n- Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509)\n\nMisc:\n\n- Update development dependencies\n- test rails 5.2 on travis (@lostapathy, #524)\n- update ruby versions for travis (@lostapathy, #525)\n\n## 3.2.0 (September 21st, 2017)\n\nFeatures:\n\n- Allow configuration of how often winning alternatives are recalculated (@patbl, #501)\n\nBugfixes:\n\n- Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503)\n- Fix combined experiments (@semanticart, #502)\n\n## 3.1.1 (August 30th, 2017)\n\nBugfixes:\n\n- Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498)\n\nMisc:\n\n- Document testing with RSpec (@eliotsykes, #495)\n\n## 3.1.0 (August 14th, 2017)\n\nFeatures:\n\n- Support for combined experiments (@daviddening, #493)\n- Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490)\n- Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487)\n\nBugfixes:\n\n- Blocked a few more common bot user agents (@kylerippey, #485)\n\nMisc:\n\n- Repository Audit by Maintainer.io (@RichardLitt, #484)\n- Update development dependencies\n- Test on ruby 2.4.1\n- Test compatibility with rails 5.1\n- Add uris to metadata section in gemspec\n\n## 3.0.0 (March 30th, 2017)\n\nFeatures:\n\n- added block randomization algorithm and specs (@hulleywood, #475)\n- Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460)\n\nBugfixes:\n\n- Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470)\n- Fix issue where redis isn't required (@tomciopp , #466)\n\nMisc:\n\n- Avoid variable_size_secure_compare private method (@eliotsykes, #465)\n\n## 2.2.0 (November 11th, 2016)\n\n**Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset.\n\nFeatures:\n\n- Remove dependency on Redis::Namespace (@bschaeffer, #425)\n- Make resetting on experiment change optional (@moggyboy, #430)\n- Add ability to force alternative on dashboard (@ccallebs, #437)\n\nBugfixes:\n\n- Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432)\n\nMisc:\n\n- Remove Explicit Return (@BradHudson, #441)\n- Update Redis config docs (@bschaeffer, #422)\n- Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443)\n- Removed a couple old ruby 1.8 hacks (@andrew, #456)\n- Run tests on rails 5 (@andrew, #457)\n- Fixed a few codeclimate warnings (@andrew, #458)\n- Use codeclimate for test coverage (@andrew #455)\n\n## 2.1.0 (August 8th, 2016)\n\nFeatures:\n\n- Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426)\n\n## 2.0.0 (July 17th, 2016)\n\nBreaking changes:\n\n- Removed deprecated `finished` and `begin_experiment` methods\n- Namespaced override param to avoid potential clashes (@henrik, #398)\n\n## 1.7.0 (June 28th, 2016)\n\nFeatures:\n\n- Running concurrent experiments on same endpoint/view (@karmakaze, #421)\n\n## 1.6.0 (June 16th, 2016)\n\nFeatures:\n\n- Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420)\n\n## 1.5.0 (June 8th, 2016)\n\nFeatures:\n\n- Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409)\n- Optional custom persistence adapter (@ndelage, #411)\n\nMisc:\n\n- Use fakeredis for testing (@andrew, #412)\n\n## 1.4.5 (June 7th, 2016)\n\nBugfixes:\n\n- FIX Negative numbers on non-finished (@divineforest, #408)\n- Eliminate extra RedisAdapter hget (@karmakaze, #407)\n- Remove unecessary code from Experiment class (@pakallis, #391, #392, #393)\n\nMisc:\n\n- Simplify Configuration#normalized_experiments (@pakallis, #395)\n- Clarify test running instructions (@henrik, #397)\n\n## 1.4.4 (May 9th, 2016)\n\nBugfixes:\n\n- Increment participation if store override is true and no experiment key exists (@spheric, #380)\n\nMisc:\n\n- Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389)\n- Added minimum version requirement to simple-random\n- Clarify finished with first option being a hash in Readme (@henrik, #382)\n- Refactoring the User abstraction (@andreibondarev, #384)\n\n## 1.4.3 (April 28th, 2016)\n\nFeatures:\n\n- add on_trial callback whenever a trial is started (@mtyeh411, #375)\n\nBugfixes:\n\n- Allow algorithm configuration at experiment level (@007sumit, #376)\n\nMisc:\n\n- only choose override if it exists as valid alternative (@spheric, #377)\n\n## 1.4.2 (April 25th, 2016)\n\nMisc:\n\n- Deprecated some legacy methods (@andreibondarev, #374)\n\n## 1.4.1 (April 21st, 2016)\n\nBugfixes:\n\n- respect manual start configuration after an experiment has been deleted (@mtyeh411, #372)\n\nMisc:\n\n- Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365)\n- Revise specs according to http://betterspecs.org/ (@hkliya, #369)\n\n## 1.4.0 (April 2nd, 2016)\n\nFeatures:\n\n- Added experiment filters to dashboard (@ccallebs, #363, #364)\n- Added Contributor Covenant Code of Conduct\n\n## 1.3.2 (January 2nd, 2016)\n\nBugfixes:\n\n- Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352)\n\n## 1.3.1 (January 1st, 2016)\n\nBugfixes:\n\n- Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349)\n\n## 1.3.0 (October 20th, 2015)\n\nFeatures:\n\n - allow for custom redis_url different from ENV variable (@davidgrieser, #323)\n - add ability to change the length of the persistence cookie (@peterylai, #335)\n\nBugfixes:\n\n - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342)\n - Fix active experiments when experiment is on a later version (@ndrisso, #331)\n - Fix caching of winning alternative (@nfm, #329)\n\nMisc:\n\n - Remove duplication from Experiment#save (@pakallis, #333)\n - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332)\n\n## 1.2.1 (May 17th, 2015)\n\nFeatures:\n\n - Handle redis DNS resolution failures gracefully (@fusion2004, #310)\n - Push metadata to ab_test block (@ekorneeff, #296)\n - Helper methods are now private when included in controllers (@ipoval, #303)\n\nBugfixes:\n\n - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313)\n - Don't use capture helper from ActionView (@tomasdundacek, #312)\n\nMisc:\n\n - Remove body \"max-width\" from dashboard (@xicreative, #299)\n - fix private for class methods (@ipoval, #301)\n - minor memoization fix in spec (@ipoval, #304)\n - Minor documentation fixes (#295, #297, #305, #308)\n\n## 1.2.0 (January 24th, 2015)\n\nFeatures:\n\n - Configure redis using environment variables if available (@saratovsource , #293)\n - Store metadata on experiment configuration (@dekz, #291)\n\nBugfixes:\n\n - Revert the Trial#complete! public API to support noargs (@dekz, #292)\n\n## 1.1.0 (January 9th, 2015)\n\nChanges:\n\n - Public class methods on `Split::Experiment` (e.g., `find_or_create`)\n have been moved to `Split::ExperimentCatalog`.\n\nFeatures:\n\n - Decouple trial from Split::Helper (@joshdover, #286)\n - Helper method for Active Experiments (@blahblahblah-, #273)\n\nMisc:\n\n - Use the new travis container based infrastructure for tests (@andrew, #280)\n\n## 1.0.0 (October 12th, 2014)\n\nChanges:\n\n - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271)\n\n## 0.8.0 (September 25th, 2014)\n\nFeatures:\n\n - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251)\n - support multiple metrics per experiment (@stevenou, #260)\n\nBugfixes:\n\n - Avoiding call to params in EncapsulatedHelper (@afn, #257)\n\n## 0.7.3 (September 16th, 2014)\n\nFeatures:\n\n - Disable all split tests via a URL parameter (@hwartig, #263)\n\nBugfixes:\n\n - Correctly escape experiment names on dashboard (@ecaron, #265)\n - Handle redis connection exception error properly (@andrew, #245)\n\n## 0.7.2 (June 12th, 2014)\n\nFeatures:\n\n - Show metrics on the dashboard (@swrobel, #241)\n\nBugfixes:\n\n - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253)\n - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255)\n\n## 0.7.1 (March 20th, 2014)\n\nFeatures:\n\n - You can now reopen experiment from the dashboard (@mikezaby, #235)\n\nMisc:\n\n - Internal code tidy up (@IanVaughan, #238)\n\n## 0.7.0 (December 26th, 2013)\n\nFeatures:\n\n - Significantly improved z-score algorithm (@caser ,#221)\n - Better sorting of Experiments on dashboard (@wadako111, #218)\n\nBugfixes:\n\n - Fixed start button not being displayed in some cases (@vigosan, #219)\n\nMisc:\n\n - Experiment#initialize refactoring (@nberger, #224)\n - Extract ExperimentStore into a seperate class (@nberger, #225)\n\n## 0.6.6 (October 15th, 2013)\n\nFeatures:\n\n - Sort experiments on Dashboard so \"active\" ones without a winner appear first (@swrobel, #204)\n - Starting tests manually (@duksis, #209)\n\nBugfixes:\n\n - Only trigger completion callback with valid Trial (@segfaultAX, #208)\n - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213)\n\nMisc:\n\n - Added more bots to filter list (@lbeder, #214, #215, #216)\n\n## 0.6.5 (August 23, 2013)\n\nFeatures:\n\n - Added Redis adapter for persisting experiments across sessions (@fengb, #203)\n\nMisc:\n\n - Expand upon algorithms section in README (@swrobel, #200)\n\n## 0.6.4 (August 8, 2013)\n\nFeatures:\n\n - Add hooks for experiment deletion and resetting (@craigmcnamara, #198)\n - Allow Split::Helper to be used outside of a controller (@nfm, #190)\n - Show current Rails/Rack Env in dashboard (@rceee, #187)\n\nBugfixes:\n\n - Fix whiplash algorithm when using goals (@swrobel, #193)\n\nMisc:\n\n - Refactor dashboard js (@buddhamagnet)\n\n## 0.6.3 (July 8, 2013)\n\nFeatures:\n\n - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176)\n\nBugfixes:\n\n - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177)\n\n## 0.6.2 (June 6, 2013)\n\nFeatures:\n\n - Rails 2.3 compatibility (@bhcarpenter, #167)\n - Adding possibility to store overridden alternative (@duksis, #173)\n\nMisc:\n\n - Now testing against multiple versions of rails\n\n## 0.6.1 (May 4, 2013)\n\nBugfixes:\n\n - Use the specified algorithm for the experiment instead of the default (@woodhull, #165)\n\nMisc:\n\n - Ensure experiements are valid when configuring (@ashmckenzie, #159)\n - Allow arrays to be passed to ab_test (@fenelon, #156)\n\n## 0.6.0 (April 4, 2013)\n\nFeatures:\n\n - Support for Ruby 2.0.0 (@phoet, #142)\n - Multiple Goals (@liujin, #109)\n - Ignoring IPs using Regular Expressions (@waynemoore, #119)\n - Added ability to add more bots to the default list (@themgt, #140)\n - Allow custom configuration of user blocking logic (@phoet , #148)\n\nBugfixes:\n\n - Fixed regression in handling of config files (@iangreenleaf, #115)\n - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67)\n - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126)\n\nMisc:\n\n - updated minimum json version requirement\n - Refactor Yaml Configuration (@rtwomey, #124)\n - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118)\n - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko)\n - Improved Readme (@iangreenleaf @phoet)\n\n## 0.5.0 (January 28, 2013)\n\nFeatures:\n\n - Persistence Adapters: Cookies and Session (@patbenatar, #98)\n - Configure experiments from a hash (@iangreenleaf, #97)\n - Pluggable sampling algorithms (@woodhull, #105)\n\nBugfixes:\n\n - Fixed negative number of non-finished rates (@philnash, #83)\n - Fixed behaviour of finished(:reset => false) (@philnash, #88)\n - Only take into consideration positive z-scores (@thomasmaas, #96)\n - Amended ab_test method to raise ArgumentError if passed integers or symbols as\n alternatives (@buddhamagnet, #81)\n\n## 0.4.6 (October 28, 2012)\n\nFeatures:\n\n - General code quality improvements (@buddhamagnet, #79)\n\nBugfixes:\n\n - Don't increment the experiment counter if user has finished (@dimko, #78)\n - Fixed an incorrect test (@jaywengrow, #74)\n\n## 0.4.5 (August 30, 2012)\n\nBugfixes:\n\n - Fixed header gradient in FF/Opera (@philnash, #69)\n - Fixed reseting of experiment in session (@apsoto, #43)\n\n## 0.4.4 (August 9, 2012)\n\nFeatures:\n\n - Allow parameter overrides, even without Redis. (@bhcarpenter, #62)\n\nBugfixes:\n\n - Fixes version number always increasing when alternatives are changed (@philnash, #63)\n - updated guard-rspec to version 1.2\n\n## 0.4.3 (July 8, 2012)\n\nFeatures:\n\n - redis failover now recovers from all redis-related exceptions\n\n## 0.4.2 (June 1, 2012)\n\nFeatures:\n\n - Now works with v3.0 of redis gem\n\nBugfixes:\n\n - Fixed redis failover on Rubinius\n\n## 0.4.1 (April 6, 2012)\n\nFeatures:\n\n - Added configuration option to disable Split testing (@ilyakatz, #45)\n\nBugfixes:\n\n - Fix weights for existing experiments (@andreas, #40)\n - Fixed dashboard range error (@andrew, #42)\n\n## 0.4.0 (March 7, 2012)\n\n**IMPORTANT**\n\nIf using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug.\n\nFeatures:\n\n - Experiments now record when they were started (@vrish88, #35)\n - Old versions of experiments in sessions are now cleaned up\n - Avoid users participating in multiple experiments at once (#21)\n\nBugfixes:\n\n - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34)\n - confidence_level helper should handle tiny z-scores (#23)\n\n## 0.3.3 (February 16, 2012)\n\nBugfixes:\n\n - Fixed redis failover when a block was passed to ab_test (@layflags, #33)\n\n## 0.3.2 (February 12, 2012)\n\nFeatures:\n\n - Handle redis errors gracefully (@layflags, #32)\n\n## 0.3.1 (November 19, 2011)\n\nFeatures:\n\n - General code tidy up (@ryanlecompte, #22, @mocoso, #28)\n - Lazy loading data from Redis (@lautis, #25)\n\nBugfixes:\n\n - Handle unstarted experiments (@mocoso, #27)\n - Relaxed Sinatra version requirement (@martinclu, #24)\n\n\n## 0.3.0 (October 9, 2011)\n\nFeatures:\n\n - Redesigned dashboard (@mrappleton, #17)\n - Use atomic increments in redis for better concurrency (@lautis, #18)\n - Weighted alternatives\n\nBugfixes:\n\n - Fix to allow overriding of experiments that aren't on version 1\n\n\n## 0.2.4 (July 18, 2011)\n\nFeatures:\n\n - Added option to finished to not reset the users session\n\nBugfixes:\n\n - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols\n\n## 0.2.3 (June 26, 2011)\n\nFeatures:\n\n - Experiments can now be deleted from the dashboard\n - ab_test helper now accepts a block\n - Improved dashboard\n\nBugfixes:\n\n - After resetting an experiment, existing users of that experiment will also be reset\n\n## 0.2.2 (June 11, 2011)\n\nFeatures:\n\n - Updated redis-namespace requirement to 1.0.3\n - Added a configuration object for changing options\n - Robot regex can now be changed via a configuration options\n - Added ability to ignore visits from specified IP addresses\n - Dashboard now shows percentage improvement of alternatives compared to the control\n - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives\n\nBugfixes:\n\n - Saving an experiment multiple times no longer creates duplicate alternatives\n\n## 0.2.1 (May 29, 2011)\n\nBugfixes:\n\n - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x\n\n## 0.2.0 (May 29, 2011)\n\nFeatures:\n\n - Override an alternative via a url parameter\n - Experiments can now be reset from the dashboard\n - The first alternative is now considered the control\n - General dashboard usability improvements\n - Robots are ignored and given the control alternative\n\nBugfixes:\n\n - Alternatives are now store in a list rather than a set to ensure consistent ordering\n - Fixed diving by zero errors\n\n## 0.1.1 (May 18, 2011)\n\nBugfixes:\n\n - More Robust conversion rate display on dashboard\n - Ensure `Split::Version` is available everywhere, fixed dashboard\n\n## 0.1.0 (May 17, 2011)\n\nInitial Release\n\n Add Changelog\n\n @@ -1,3 +1,8 @@\n+## 3.4.1 (November 12th, 2019)\n+\n+Bugfixes:\n+- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n+\n ## 3.4.0 (November 9th, 2019)\n \n Features:\n"},"addition_count":{"kind":"number","value":5,"string":"5"},"commit_subject":{"kind":"string","value":"Add Changelog"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675150,"cells":{"id":{"kind":"string","value":"10070800"},"text":{"kind":"string","value":" CHANGELOG.md\n ## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Force experiment does not count for metrics (@andrehjr, #637)\n- Fix cleanup_old_versions! misbehaviour (@serggl, #661)\n\nFeatures:\n- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625)\n- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616)\n- Introduce enable/disable experiment cohorting (@robin-phung, #615)\n- Add on_experiment_winner_choose callback (@GenaMinenkov, #574)\n- Add Split::Cache to reduce load on Redis (@rdh, #648)\n- Caching based optimization in the experiment#save path (@amangup, #652)\n- Adds config option for cookie domain (@joedelia, #664)\n\nMisc:\n- Drop support for Ruby < 2.5 (@andrehjr, #627)\n- Drop support for Rails < 5 (@andrehjr, #607)\n- Bump minimum required redis to 4.2 (@andrehjr, #628)\n- Removed repeated loading from config (@robin-phung, #619)\n- Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632)\n- Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631)\n- Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630)\n- Fix typo of in `Split::Trial` class variable (TomasBarry, #644)\n- Single HSET to update values, instead of multiple ones (@andrehjr, #640)\n- Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659)\n- Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639)\n- Adding documentation related to what is stored on cookies. (@andrehjr, #634)\n- Keep railtie defined under the Split gem namespace (@avit, #666)\n- Update RSpec helper to support block syntax (@clowder, #665)\n\n## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n\n## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release.\n- Make dashboard pagination default \"per\" param configurable (@alopatin, #597)\n\nBugfixes:\n- Fix `force_alternative` for experiments with incremented version (@giraffate, #568)\n- Persist alternative weights (@giraffate, #570)\n- Combined experiment performance improvements (@gnanou, #575)\n- Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577)\n- When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582)\n\nMisc:\n- Remove `rubyforge_project` from gemspec (@giraffate, #583)\n- Fix URLs to replace http with https (@giraffate , #584)\n- Lazily include split helpers in ActionController::Base (@hasghari, #586)\n- Fix unused variable warnings (@andrehjr, #592)\n- Fix ruby warnings (@andrehjr, #593)\n- Update rubocop.yml config (@andrehjr, #594)\n- Add frozen_string_literal to all files that were missing it (@andrehjr, #595)\n\n## 3.3.2 (April 12th, 2019)\n\nFeatures:\n- Added uptime robot to configuration.rb (@razel1982, #556)\n- Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555)\n\nBugfixes:\n- Fix error message interpolation (@hanibash, #553)\n- Fix Bigdecimal warnings (@agraves, #551)\n- Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544)\n- Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr)\n\nMisc:\n- Update travis to add Rails 6 (@edmilton, #559)\n- Fix broken specs in developement environment (@dougpetronilio, #557)\n\n## 3.3.1 (January 11th, 2019)\n\nFeatures:\n- Filter some more bots (@janosch-x, #542)\n\nBugfixes:\n- Fix Dashboard Pagination Helper typo (@cattekin, #541)\n- Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539)\n- fix user participating alternative not found (@NaturalHokke, #536)\n\nMisc:\n- Tweak RSpec instructions (@eliotsykes, #540)\n- Improve README regarding rspec usage (@vermaxik, #538)\n\n## 3.3.0 (August 13th, 2018)\n\nFeatures:\n\n- Added pagination for dashboard (@GeorgeGorbanev, #518)\n- Add Facebot crawler to list of bots (@pfeiffer, #530)\n- Ignore previewing requests (@pfeiffer, #531)\n- Fix binding of ignore_filter (@pfeiffer, #533)\n\nBugfixes:\n\n- Fix cookie header duplication (@andrehjr, #522)\n\nPerformance:\n\n- Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509)\n\nMisc:\n\n- Update development dependencies\n- test rails 5.2 on travis (@lostapathy, #524)\n- update ruby versions for travis (@lostapathy, #525)\n\n## 3.2.0 (September 21st, 2017)\n\nFeatures:\n\n- Allow configuration of how often winning alternatives are recalculated (@patbl, #501)\n\nBugfixes:\n\n- Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503)\n- Fix combined experiments (@semanticart, #502)\n\n## 3.1.1 (August 30th, 2017)\n\nBugfixes:\n\n- Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498)\n\nMisc:\n\n- Document testing with RSpec (@eliotsykes, #495)\n\n## 3.1.0 (August 14th, 2017)\n\nFeatures:\n\n- Support for combined experiments (@daviddening, #493)\n- Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490)\n- Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487)\n\nBugfixes:\n\n- Blocked a few more common bot user agents (@kylerippey, #485)\n\nMisc:\n\n- Repository Audit by Maintainer.io (@RichardLitt, #484)\n- Update development dependencies\n- Test on ruby 2.4.1\n- Test compatibility with rails 5.1\n- Add uris to metadata section in gemspec\n\n## 3.0.0 (March 30th, 2017)\n\nFeatures:\n\n- added block randomization algorithm and specs (@hulleywood, #475)\n- Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460)\n\nBugfixes:\n\n- Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470)\n- Fix issue where redis isn't required (@tomciopp , #466)\n\nMisc:\n\n- Avoid variable_size_secure_compare private method (@eliotsykes, #465)\n\n## 2.2.0 (November 11th, 2016)\n\n**Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset.\n\nFeatures:\n\n- Remove dependency on Redis::Namespace (@bschaeffer, #425)\n- Make resetting on experiment change optional (@moggyboy, #430)\n- Add ability to force alternative on dashboard (@ccallebs, #437)\n\nBugfixes:\n\n- Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432)\n\nMisc:\n\n- Remove Explicit Return (@BradHudson, #441)\n- Update Redis config docs (@bschaeffer, #422)\n- Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443)\n- Removed a couple old ruby 1.8 hacks (@andrew, #456)\n- Run tests on rails 5 (@andrew, #457)\n- Fixed a few codeclimate warnings (@andrew, #458)\n- Use codeclimate for test coverage (@andrew #455)\n\n## 2.1.0 (August 8th, 2016)\n\nFeatures:\n\n- Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426)\n\n## 2.0.0 (July 17th, 2016)\n\nBreaking changes:\n\n- Removed deprecated `finished` and `begin_experiment` methods\n- Namespaced override param to avoid potential clashes (@henrik, #398)\n\n## 1.7.0 (June 28th, 2016)\n\nFeatures:\n\n- Running concurrent experiments on same endpoint/view (@karmakaze, #421)\n\n## 1.6.0 (June 16th, 2016)\n\nFeatures:\n\n- Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420)\n\n## 1.5.0 (June 8th, 2016)\n\nFeatures:\n\n- Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409)\n- Optional custom persistence adapter (@ndelage, #411)\n\nMisc:\n\n- Use fakeredis for testing (@andrew, #412)\n\n## 1.4.5 (June 7th, 2016)\n\nBugfixes:\n\n- FIX Negative numbers on non-finished (@divineforest, #408)\n- Eliminate extra RedisAdapter hget (@karmakaze, #407)\n- Remove unecessary code from Experiment class (@pakallis, #391, #392, #393)\n\nMisc:\n\n- Simplify Configuration#normalized_experiments (@pakallis, #395)\n- Clarify test running instructions (@henrik, #397)\n\n## 1.4.4 (May 9th, 2016)\n\nBugfixes:\n\n- Increment participation if store override is true and no experiment key exists (@spheric, #380)\n\nMisc:\n\n- Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389)\n- Added minimum version requirement to simple-random\n- Clarify finished with first option being a hash in Readme (@henrik, #382)\n- Refactoring the User abstraction (@andreibondarev, #384)\n\n## 1.4.3 (April 28th, 2016)\n\nFeatures:\n\n- add on_trial callback whenever a trial is started (@mtyeh411, #375)\n\nBugfixes:\n\n- Allow algorithm configuration at experiment level (@007sumit, #376)\n\nMisc:\n\n- only choose override if it exists as valid alternative (@spheric, #377)\n\n## 1.4.2 (April 25th, 2016)\n\nMisc:\n\n- Deprecated some legacy methods (@andreibondarev, #374)\n\n## 1.4.1 (April 21st, 2016)\n\nBugfixes:\n\n- respect manual start configuration after an experiment has been deleted (@mtyeh411, #372)\n\nMisc:\n\n- Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365)\n- Revise specs according to http://betterspecs.org/ (@hkliya, #369)\n\n## 1.4.0 (April 2nd, 2016)\n\nFeatures:\n\n- Added experiment filters to dashboard (@ccallebs, #363, #364)\n- Added Contributor Covenant Code of Conduct\n\n## 1.3.2 (January 2nd, 2016)\n\nBugfixes:\n\n- Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352)\n\n## 1.3.1 (January 1st, 2016)\n\nBugfixes:\n\n- Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349)\n\n## 1.3.0 (October 20th, 2015)\n\nFeatures:\n\n - allow for custom redis_url different from ENV variable (@davidgrieser, #323)\n - add ability to change the length of the persistence cookie (@peterylai, #335)\n\nBugfixes:\n\n - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342)\n - Fix active experiments when experiment is on a later version (@ndrisso, #331)\n - Fix caching of winning alternative (@nfm, #329)\n\nMisc:\n\n - Remove duplication from Experiment#save (@pakallis, #333)\n - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332)\n\n## 1.2.1 (May 17th, 2015)\n\nFeatures:\n\n - Handle redis DNS resolution failures gracefully (@fusion2004, #310)\n - Push metadata to ab_test block (@ekorneeff, #296)\n - Helper methods are now private when included in controllers (@ipoval, #303)\n\nBugfixes:\n\n - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313)\n - Don't use capture helper from ActionView (@tomasdundacek, #312)\n\nMisc:\n\n - Remove body \"max-width\" from dashboard (@xicreative, #299)\n - fix private for class methods (@ipoval, #301)\n - minor memoization fix in spec (@ipoval, #304)\n - Minor documentation fixes (#295, #297, #305, #308)\n\n## 1.2.0 (January 24th, 2015)\n\nFeatures:\n\n - Configure redis using environment variables if available (@saratovsource , #293)\n - Store metadata on experiment configuration (@dekz, #291)\n\nBugfixes:\n\n - Revert the Trial#complete! public API to support noargs (@dekz, #292)\n\n## 1.1.0 (January 9th, 2015)\n\nChanges:\n\n - Public class methods on `Split::Experiment` (e.g., `find_or_create`)\n have been moved to `Split::ExperimentCatalog`.\n\nFeatures:\n\n - Decouple trial from Split::Helper (@joshdover, #286)\n - Helper method for Active Experiments (@blahblahblah-, #273)\n\nMisc:\n\n - Use the new travis container based infrastructure for tests (@andrew, #280)\n\n## 1.0.0 (October 12th, 2014)\n\nChanges:\n\n - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271)\n\n## 0.8.0 (September 25th, 2014)\n\nFeatures:\n\n - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251)\n - support multiple metrics per experiment (@stevenou, #260)\n\nBugfixes:\n\n - Avoiding call to params in EncapsulatedHelper (@afn, #257)\n\n## 0.7.3 (September 16th, 2014)\n\nFeatures:\n\n - Disable all split tests via a URL parameter (@hwartig, #263)\n\nBugfixes:\n\n - Correctly escape experiment names on dashboard (@ecaron, #265)\n - Handle redis connection exception error properly (@andrew, #245)\n\n## 0.7.2 (June 12th, 2014)\n\nFeatures:\n\n - Show metrics on the dashboard (@swrobel, #241)\n\nBugfixes:\n\n - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253)\n - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255)\n\n## 0.7.1 (March 20th, 2014)\n\nFeatures:\n\n - You can now reopen experiment from the dashboard (@mikezaby, #235)\n\nMisc:\n\n - Internal code tidy up (@IanVaughan, #238)\n\n## 0.7.0 (December 26th, 2013)\n\nFeatures:\n\n - Significantly improved z-score algorithm (@caser ,#221)\n - Better sorting of Experiments on dashboard (@wadako111, #218)\n\nBugfixes:\n\n - Fixed start button not being displayed in some cases (@vigosan, #219)\n\nMisc:\n\n - Experiment#initialize refactoring (@nberger, #224)\n - Extract ExperimentStore into a seperate class (@nberger, #225)\n\n## 0.6.6 (October 15th, 2013)\n\nFeatures:\n\n - Sort experiments on Dashboard so \"active\" ones without a winner appear first (@swrobel, #204)\n - Starting tests manually (@duksis, #209)\n\nBugfixes:\n\n - Only trigger completion callback with valid Trial (@segfaultAX, #208)\n - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213)\n\nMisc:\n\n - Added more bots to filter list (@lbeder, #214, #215, #216)\n\n## 0.6.5 (August 23, 2013)\n\nFeatures:\n\n - Added Redis adapter for persisting experiments across sessions (@fengb, #203)\n\nMisc:\n\n - Expand upon algorithms section in README (@swrobel, #200)\n\n## 0.6.4 (August 8, 2013)\n\nFeatures:\n\n - Add hooks for experiment deletion and resetting (@craigmcnamara, #198)\n - Allow Split::Helper to be used outside of a controller (@nfm, #190)\n - Show current Rails/Rack Env in dashboard (@rceee, #187)\n\nBugfixes:\n\n - Fix whiplash algorithm when using goals (@swrobel, #193)\n\nMisc:\n\n - Refactor dashboard js (@buddhamagnet)\n\n## 0.6.3 (July 8, 2013)\n\nFeatures:\n\n - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176)\n\nBugfixes:\n\n - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177)\n\n## 0.6.2 (June 6, 2013)\n\nFeatures:\n\n - Rails 2.3 compatibility (@bhcarpenter, #167)\n - Adding possibility to store overridden alternative (@duksis, #173)\n\nMisc:\n\n - Now testing against multiple versions of rails\n\n## 0.6.1 (May 4, 2013)\n\nBugfixes:\n\n - Use the specified algorithm for the experiment instead of the default (@woodhull, #165)\n\nMisc:\n\n - Ensure experiements are valid when configuring (@ashmckenzie, #159)\n - Allow arrays to be passed to ab_test (@fenelon, #156)\n\n## 0.6.0 (April 4, 2013)\n\nFeatures:\n\n - Support for Ruby 2.0.0 (@phoet, #142)\n - Multiple Goals (@liujin, #109)\n - Ignoring IPs using Regular Expressions (@waynemoore, #119)\n - Added ability to add more bots to the default list (@themgt, #140)\n - Allow custom configuration of user blocking logic (@phoet , #148)\n\nBugfixes:\n\n - Fixed regression in handling of config files (@iangreenleaf, #115)\n - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67)\n - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126)\n\nMisc:\n\n - updated minimum json version requirement\n - Refactor Yaml Configuration (@rtwomey, #124)\n - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118)\n - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko)\n - Improved Readme (@iangreenleaf @phoet)\n\n## 0.5.0 (January 28, 2013)\n\nFeatures:\n\n - Persistence Adapters: Cookies and Session (@patbenatar, #98)\n - Configure experiments from a hash (@iangreenleaf, #97)\n - Pluggable sampling algorithms (@woodhull, #105)\n\nBugfixes:\n\n - Fixed negative number of non-finished rates (@philnash, #83)\n - Fixed behaviour of finished(:reset => false) (@philnash, #88)\n - Only take into consideration positive z-scores (@thomasmaas, #96)\n - Amended ab_test method to raise ArgumentError if passed integers or symbols as\n alternatives (@buddhamagnet, #81)\n\n## 0.4.6 (October 28, 2012)\n\nFeatures:\n\n - General code quality improvements (@buddhamagnet, #79)\n\nBugfixes:\n\n - Don't increment the experiment counter if user has finished (@dimko, #78)\n - Fixed an incorrect test (@jaywengrow, #74)\n\n## 0.4.5 (August 30, 2012)\n\nBugfixes:\n\n - Fixed header gradient in FF/Opera (@philnash, #69)\n - Fixed reseting of experiment in session (@apsoto, #43)\n\n## 0.4.4 (August 9, 2012)\n\nFeatures:\n\n - Allow parameter overrides, even without Redis. (@bhcarpenter, #62)\n\nBugfixes:\n\n - Fixes version number always increasing when alternatives are changed (@philnash, #63)\n - updated guard-rspec to version 1.2\n\n## 0.4.3 (July 8, 2012)\n\nFeatures:\n\n - redis failover now recovers from all redis-related exceptions\n\n## 0.4.2 (June 1, 2012)\n\nFeatures:\n\n - Now works with v3.0 of redis gem\n\nBugfixes:\n\n - Fixed redis failover on Rubinius\n\n## 0.4.1 (April 6, 2012)\n\nFeatures:\n\n - Added configuration option to disable Split testing (@ilyakatz, #45)\n\nBugfixes:\n\n - Fix weights for existing experiments (@andreas, #40)\n - Fixed dashboard range error (@andrew, #42)\n\n## 0.4.0 (March 7, 2012)\n\n**IMPORTANT**\n\nIf using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug.\n\nFeatures:\n\n - Experiments now record when they were started (@vrish88, #35)\n - Old versions of experiments in sessions are now cleaned up\n - Avoid users participating in multiple experiments at once (#21)\n\nBugfixes:\n\n - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34)\n - confidence_level helper should handle tiny z-scores (#23)\n\n## 0.3.3 (February 16, 2012)\n\nBugfixes:\n\n - Fixed redis failover when a block was passed to ab_test (@layflags, #33)\n\n## 0.3.2 (February 12, 2012)\n\nFeatures:\n\n - Handle redis errors gracefully (@layflags, #32)\n\n## 0.3.1 (November 19, 2011)\n\nFeatures:\n\n - General code tidy up (@ryanlecompte, #22, @mocoso, #28)\n - Lazy loading data from Redis (@lautis, #25)\n\nBugfixes:\n\n - Handle unstarted experiments (@mocoso, #27)\n - Relaxed Sinatra version requirement (@martinclu, #24)\n\n\n## 0.3.0 (October 9, 2011)\n\nFeatures:\n\n - Redesigned dashboard (@mrappleton, #17)\n - Use atomic increments in redis for better concurrency (@lautis, #18)\n - Weighted alternatives\n\nBugfixes:\n\n - Fix to allow overriding of experiments that aren't on version 1\n\n\n## 0.2.4 (July 18, 2011)\n\nFeatures:\n\n - Added option to finished to not reset the users session\n\nBugfixes:\n\n - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols\n\n## 0.2.3 (June 26, 2011)\n\nFeatures:\n\n - Experiments can now be deleted from the dashboard\n - ab_test helper now accepts a block\n - Improved dashboard\n\nBugfixes:\n\n - After resetting an experiment, existing users of that experiment will also be reset\n\n## 0.2.2 (June 11, 2011)\n\nFeatures:\n\n - Updated redis-namespace requirement to 1.0.3\n - Added a configuration object for changing options\n - Robot regex can now be changed via a configuration options\n - Added ability to ignore visits from specified IP addresses\n - Dashboard now shows percentage improvement of alternatives compared to the control\n - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives\n\nBugfixes:\n\n - Saving an experiment multiple times no longer creates duplicate alternatives\n\n## 0.2.1 (May 29, 2011)\n\nBugfixes:\n\n - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x\n\n## 0.2.0 (May 29, 2011)\n\nFeatures:\n\n - Override an alternative via a url parameter\n - Experiments can now be reset from the dashboard\n - The first alternative is now considered the control\n - General dashboard usability improvements\n - Robots are ignored and given the control alternative\n\nBugfixes:\n\n - Alternatives are now store in a list rather than a set to ensure consistent ordering\n - Fixed diving by zero errors\n\n## 0.1.1 (May 18, 2011)\n\nBugfixes:\n\n - More Robust conversion rate display on dashboard\n - Ensure `Split::Version` is available everywhere, fixed dashboard\n\n## 0.1.0 (May 17, 2011)\n\nInitial Release\n\n Add Changelog\n\n @@ -1,3 +1,8 @@\n+## 3.4.1 (November 12th, 2019)\n+\n+Bugfixes:\n+- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n+\n ## 3.4.0 (November 9th, 2019)\n \n Features:\n"},"addition_count":{"kind":"number","value":5,"string":"5"},"commit_subject":{"kind":"string","value":"Add Changelog"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675151,"cells":{"id":{"kind":"string","value":"10070801"},"text":{"kind":"string","value":" CHANGELOG.md\n ## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Force experiment does not count for metrics (@andrehjr, #637)\n- Fix cleanup_old_versions! misbehaviour (@serggl, #661)\n\nFeatures:\n- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625)\n- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616)\n- Introduce enable/disable experiment cohorting (@robin-phung, #615)\n- Add on_experiment_winner_choose callback (@GenaMinenkov, #574)\n- Add Split::Cache to reduce load on Redis (@rdh, #648)\n- Caching based optimization in the experiment#save path (@amangup, #652)\n- Adds config option for cookie domain (@joedelia, #664)\n\nMisc:\n- Drop support for Ruby < 2.5 (@andrehjr, #627)\n- Drop support for Rails < 5 (@andrehjr, #607)\n- Bump minimum required redis to 4.2 (@andrehjr, #628)\n- Removed repeated loading from config (@robin-phung, #619)\n- Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632)\n- Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631)\n- Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630)\n- Fix typo of in `Split::Trial` class variable (TomasBarry, #644)\n- Single HSET to update values, instead of multiple ones (@andrehjr, #640)\n- Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659)\n- Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639)\n- Adding documentation related to what is stored on cookies. (@andrehjr, #634)\n- Keep railtie defined under the Split gem namespace (@avit, #666)\n- Update RSpec helper to support block syntax (@clowder, #665)\n\n## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n\n## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release.\n- Make dashboard pagination default \"per\" param configurable (@alopatin, #597)\n\nBugfixes:\n- Fix `force_alternative` for experiments with incremented version (@giraffate, #568)\n- Persist alternative weights (@giraffate, #570)\n- Combined experiment performance improvements (@gnanou, #575)\n- Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577)\n- When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582)\n\nMisc:\n- Remove `rubyforge_project` from gemspec (@giraffate, #583)\n- Fix URLs to replace http with https (@giraffate , #584)\n- Lazily include split helpers in ActionController::Base (@hasghari, #586)\n- Fix unused variable warnings (@andrehjr, #592)\n- Fix ruby warnings (@andrehjr, #593)\n- Update rubocop.yml config (@andrehjr, #594)\n- Add frozen_string_literal to all files that were missing it (@andrehjr, #595)\n\n## 3.3.2 (April 12th, 2019)\n\nFeatures:\n- Added uptime robot to configuration.rb (@razel1982, #556)\n- Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555)\n\nBugfixes:\n- Fix error message interpolation (@hanibash, #553)\n- Fix Bigdecimal warnings (@agraves, #551)\n- Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544)\n- Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr)\n\nMisc:\n- Update travis to add Rails 6 (@edmilton, #559)\n- Fix broken specs in developement environment (@dougpetronilio, #557)\n\n## 3.3.1 (January 11th, 2019)\n\nFeatures:\n- Filter some more bots (@janosch-x, #542)\n\nBugfixes:\n- Fix Dashboard Pagination Helper typo (@cattekin, #541)\n- Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539)\n- fix user participating alternative not found (@NaturalHokke, #536)\n\nMisc:\n- Tweak RSpec instructions (@eliotsykes, #540)\n- Improve README regarding rspec usage (@vermaxik, #538)\n\n## 3.3.0 (August 13th, 2018)\n\nFeatures:\n\n- Added pagination for dashboard (@GeorgeGorbanev, #518)\n- Add Facebot crawler to list of bots (@pfeiffer, #530)\n- Ignore previewing requests (@pfeiffer, #531)\n- Fix binding of ignore_filter (@pfeiffer, #533)\n\nBugfixes:\n\n- Fix cookie header duplication (@andrehjr, #522)\n\nPerformance:\n\n- Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509)\n\nMisc:\n\n- Update development dependencies\n- test rails 5.2 on travis (@lostapathy, #524)\n- update ruby versions for travis (@lostapathy, #525)\n\n## 3.2.0 (September 21st, 2017)\n\nFeatures:\n\n- Allow configuration of how often winning alternatives are recalculated (@patbl, #501)\n\nBugfixes:\n\n- Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503)\n- Fix combined experiments (@semanticart, #502)\n\n## 3.1.1 (August 30th, 2017)\n\nBugfixes:\n\n- Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498)\n\nMisc:\n\n- Document testing with RSpec (@eliotsykes, #495)\n\n## 3.1.0 (August 14th, 2017)\n\nFeatures:\n\n- Support for combined experiments (@daviddening, #493)\n- Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490)\n- Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487)\n\nBugfixes:\n\n- Blocked a few more common bot user agents (@kylerippey, #485)\n\nMisc:\n\n- Repository Audit by Maintainer.io (@RichardLitt, #484)\n- Update development dependencies\n- Test on ruby 2.4.1\n- Test compatibility with rails 5.1\n- Add uris to metadata section in gemspec\n\n## 3.0.0 (March 30th, 2017)\n\nFeatures:\n\n- added block randomization algorithm and specs (@hulleywood, #475)\n- Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460)\n\nBugfixes:\n\n- Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470)\n- Fix issue where redis isn't required (@tomciopp , #466)\n\nMisc:\n\n- Avoid variable_size_secure_compare private method (@eliotsykes, #465)\n\n## 2.2.0 (November 11th, 2016)\n\n**Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset.\n\nFeatures:\n\n- Remove dependency on Redis::Namespace (@bschaeffer, #425)\n- Make resetting on experiment change optional (@moggyboy, #430)\n- Add ability to force alternative on dashboard (@ccallebs, #437)\n\nBugfixes:\n\n- Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432)\n\nMisc:\n\n- Remove Explicit Return (@BradHudson, #441)\n- Update Redis config docs (@bschaeffer, #422)\n- Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443)\n- Removed a couple old ruby 1.8 hacks (@andrew, #456)\n- Run tests on rails 5 (@andrew, #457)\n- Fixed a few codeclimate warnings (@andrew, #458)\n- Use codeclimate for test coverage (@andrew #455)\n\n## 2.1.0 (August 8th, 2016)\n\nFeatures:\n\n- Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426)\n\n## 2.0.0 (July 17th, 2016)\n\nBreaking changes:\n\n- Removed deprecated `finished` and `begin_experiment` methods\n- Namespaced override param to avoid potential clashes (@henrik, #398)\n\n## 1.7.0 (June 28th, 2016)\n\nFeatures:\n\n- Running concurrent experiments on same endpoint/view (@karmakaze, #421)\n\n## 1.6.0 (June 16th, 2016)\n\nFeatures:\n\n- Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420)\n\n## 1.5.0 (June 8th, 2016)\n\nFeatures:\n\n- Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409)\n- Optional custom persistence adapter (@ndelage, #411)\n\nMisc:\n\n- Use fakeredis for testing (@andrew, #412)\n\n## 1.4.5 (June 7th, 2016)\n\nBugfixes:\n\n- FIX Negative numbers on non-finished (@divineforest, #408)\n- Eliminate extra RedisAdapter hget (@karmakaze, #407)\n- Remove unecessary code from Experiment class (@pakallis, #391, #392, #393)\n\nMisc:\n\n- Simplify Configuration#normalized_experiments (@pakallis, #395)\n- Clarify test running instructions (@henrik, #397)\n\n## 1.4.4 (May 9th, 2016)\n\nBugfixes:\n\n- Increment participation if store override is true and no experiment key exists (@spheric, #380)\n\nMisc:\n\n- Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389)\n- Added minimum version requirement to simple-random\n- Clarify finished with first option being a hash in Readme (@henrik, #382)\n- Refactoring the User abstraction (@andreibondarev, #384)\n\n## 1.4.3 (April 28th, 2016)\n\nFeatures:\n\n- add on_trial callback whenever a trial is started (@mtyeh411, #375)\n\nBugfixes:\n\n- Allow algorithm configuration at experiment level (@007sumit, #376)\n\nMisc:\n\n- only choose override if it exists as valid alternative (@spheric, #377)\n\n## 1.4.2 (April 25th, 2016)\n\nMisc:\n\n- Deprecated some legacy methods (@andreibondarev, #374)\n\n## 1.4.1 (April 21st, 2016)\n\nBugfixes:\n\n- respect manual start configuration after an experiment has been deleted (@mtyeh411, #372)\n\nMisc:\n\n- Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365)\n- Revise specs according to http://betterspecs.org/ (@hkliya, #369)\n\n## 1.4.0 (April 2nd, 2016)\n\nFeatures:\n\n- Added experiment filters to dashboard (@ccallebs, #363, #364)\n- Added Contributor Covenant Code of Conduct\n\n## 1.3.2 (January 2nd, 2016)\n\nBugfixes:\n\n- Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352)\n\n## 1.3.1 (January 1st, 2016)\n\nBugfixes:\n\n- Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349)\n\n## 1.3.0 (October 20th, 2015)\n\nFeatures:\n\n - allow for custom redis_url different from ENV variable (@davidgrieser, #323)\n - add ability to change the length of the persistence cookie (@peterylai, #335)\n\nBugfixes:\n\n - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342)\n - Fix active experiments when experiment is on a later version (@ndrisso, #331)\n - Fix caching of winning alternative (@nfm, #329)\n\nMisc:\n\n - Remove duplication from Experiment#save (@pakallis, #333)\n - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332)\n\n## 1.2.1 (May 17th, 2015)\n\nFeatures:\n\n - Handle redis DNS resolution failures gracefully (@fusion2004, #310)\n - Push metadata to ab_test block (@ekorneeff, #296)\n - Helper methods are now private when included in controllers (@ipoval, #303)\n\nBugfixes:\n\n - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313)\n - Don't use capture helper from ActionView (@tomasdundacek, #312)\n\nMisc:\n\n - Remove body \"max-width\" from dashboard (@xicreative, #299)\n - fix private for class methods (@ipoval, #301)\n - minor memoization fix in spec (@ipoval, #304)\n - Minor documentation fixes (#295, #297, #305, #308)\n\n## 1.2.0 (January 24th, 2015)\n\nFeatures:\n\n - Configure redis using environment variables if available (@saratovsource , #293)\n - Store metadata on experiment configuration (@dekz, #291)\n\nBugfixes:\n\n - Revert the Trial#complete! public API to support noargs (@dekz, #292)\n\n## 1.1.0 (January 9th, 2015)\n\nChanges:\n\n - Public class methods on `Split::Experiment` (e.g., `find_or_create`)\n have been moved to `Split::ExperimentCatalog`.\n\nFeatures:\n\n - Decouple trial from Split::Helper (@joshdover, #286)\n - Helper method for Active Experiments (@blahblahblah-, #273)\n\nMisc:\n\n - Use the new travis container based infrastructure for tests (@andrew, #280)\n\n## 1.0.0 (October 12th, 2014)\n\nChanges:\n\n - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271)\n\n## 0.8.0 (September 25th, 2014)\n\nFeatures:\n\n - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251)\n - support multiple metrics per experiment (@stevenou, #260)\n\nBugfixes:\n\n - Avoiding call to params in EncapsulatedHelper (@afn, #257)\n\n## 0.7.3 (September 16th, 2014)\n\nFeatures:\n\n - Disable all split tests via a URL parameter (@hwartig, #263)\n\nBugfixes:\n\n - Correctly escape experiment names on dashboard (@ecaron, #265)\n - Handle redis connection exception error properly (@andrew, #245)\n\n## 0.7.2 (June 12th, 2014)\n\nFeatures:\n\n - Show metrics on the dashboard (@swrobel, #241)\n\nBugfixes:\n\n - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253)\n - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255)\n\n## 0.7.1 (March 20th, 2014)\n\nFeatures:\n\n - You can now reopen experiment from the dashboard (@mikezaby, #235)\n\nMisc:\n\n - Internal code tidy up (@IanVaughan, #238)\n\n## 0.7.0 (December 26th, 2013)\n\nFeatures:\n\n - Significantly improved z-score algorithm (@caser ,#221)\n - Better sorting of Experiments on dashboard (@wadako111, #218)\n\nBugfixes:\n\n - Fixed start button not being displayed in some cases (@vigosan, #219)\n\nMisc:\n\n - Experiment#initialize refactoring (@nberger, #224)\n - Extract ExperimentStore into a seperate class (@nberger, #225)\n\n## 0.6.6 (October 15th, 2013)\n\nFeatures:\n\n - Sort experiments on Dashboard so \"active\" ones without a winner appear first (@swrobel, #204)\n - Starting tests manually (@duksis, #209)\n\nBugfixes:\n\n - Only trigger completion callback with valid Trial (@segfaultAX, #208)\n - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213)\n\nMisc:\n\n - Added more bots to filter list (@lbeder, #214, #215, #216)\n\n## 0.6.5 (August 23, 2013)\n\nFeatures:\n\n - Added Redis adapter for persisting experiments across sessions (@fengb, #203)\n\nMisc:\n\n - Expand upon algorithms section in README (@swrobel, #200)\n\n## 0.6.4 (August 8, 2013)\n\nFeatures:\n\n - Add hooks for experiment deletion and resetting (@craigmcnamara, #198)\n - Allow Split::Helper to be used outside of a controller (@nfm, #190)\n - Show current Rails/Rack Env in dashboard (@rceee, #187)\n\nBugfixes:\n\n - Fix whiplash algorithm when using goals (@swrobel, #193)\n\nMisc:\n\n - Refactor dashboard js (@buddhamagnet)\n\n## 0.6.3 (July 8, 2013)\n\nFeatures:\n\n - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176)\n\nBugfixes:\n\n - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177)\n\n## 0.6.2 (June 6, 2013)\n\nFeatures:\n\n - Rails 2.3 compatibility (@bhcarpenter, #167)\n - Adding possibility to store overridden alternative (@duksis, #173)\n\nMisc:\n\n - Now testing against multiple versions of rails\n\n## 0.6.1 (May 4, 2013)\n\nBugfixes:\n\n - Use the specified algorithm for the experiment instead of the default (@woodhull, #165)\n\nMisc:\n\n - Ensure experiements are valid when configuring (@ashmckenzie, #159)\n - Allow arrays to be passed to ab_test (@fenelon, #156)\n\n## 0.6.0 (April 4, 2013)\n\nFeatures:\n\n - Support for Ruby 2.0.0 (@phoet, #142)\n - Multiple Goals (@liujin, #109)\n - Ignoring IPs using Regular Expressions (@waynemoore, #119)\n - Added ability to add more bots to the default list (@themgt, #140)\n - Allow custom configuration of user blocking logic (@phoet , #148)\n\nBugfixes:\n\n - Fixed regression in handling of config files (@iangreenleaf, #115)\n - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67)\n - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126)\n\nMisc:\n\n - updated minimum json version requirement\n - Refactor Yaml Configuration (@rtwomey, #124)\n - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118)\n - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko)\n - Improved Readme (@iangreenleaf @phoet)\n\n## 0.5.0 (January 28, 2013)\n\nFeatures:\n\n - Persistence Adapters: Cookies and Session (@patbenatar, #98)\n - Configure experiments from a hash (@iangreenleaf, #97)\n - Pluggable sampling algorithms (@woodhull, #105)\n\nBugfixes:\n\n - Fixed negative number of non-finished rates (@philnash, #83)\n - Fixed behaviour of finished(:reset => false) (@philnash, #88)\n - Only take into consideration positive z-scores (@thomasmaas, #96)\n - Amended ab_test method to raise ArgumentError if passed integers or symbols as\n alternatives (@buddhamagnet, #81)\n\n## 0.4.6 (October 28, 2012)\n\nFeatures:\n\n - General code quality improvements (@buddhamagnet, #79)\n\nBugfixes:\n\n - Don't increment the experiment counter if user has finished (@dimko, #78)\n - Fixed an incorrect test (@jaywengrow, #74)\n\n## 0.4.5 (August 30, 2012)\n\nBugfixes:\n\n - Fixed header gradient in FF/Opera (@philnash, #69)\n - Fixed reseting of experiment in session (@apsoto, #43)\n\n## 0.4.4 (August 9, 2012)\n\nFeatures:\n\n - Allow parameter overrides, even without Redis. (@bhcarpenter, #62)\n\nBugfixes:\n\n - Fixes version number always increasing when alternatives are changed (@philnash, #63)\n - updated guard-rspec to version 1.2\n\n## 0.4.3 (July 8, 2012)\n\nFeatures:\n\n - redis failover now recovers from all redis-related exceptions\n\n## 0.4.2 (June 1, 2012)\n\nFeatures:\n\n - Now works with v3.0 of redis gem\n\nBugfixes:\n\n - Fixed redis failover on Rubinius\n\n## 0.4.1 (April 6, 2012)\n\nFeatures:\n\n - Added configuration option to disable Split testing (@ilyakatz, #45)\n\nBugfixes:\n\n - Fix weights for existing experiments (@andreas, #40)\n - Fixed dashboard range error (@andrew, #42)\n\n## 0.4.0 (March 7, 2012)\n\n**IMPORTANT**\n\nIf using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug.\n\nFeatures:\n\n - Experiments now record when they were started (@vrish88, #35)\n - Old versions of experiments in sessions are now cleaned up\n - Avoid users participating in multiple experiments at once (#21)\n\nBugfixes:\n\n - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34)\n - confidence_level helper should handle tiny z-scores (#23)\n\n## 0.3.3 (February 16, 2012)\n\nBugfixes:\n\n - Fixed redis failover when a block was passed to ab_test (@layflags, #33)\n\n## 0.3.2 (February 12, 2012)\n\nFeatures:\n\n - Handle redis errors gracefully (@layflags, #32)\n\n## 0.3.1 (November 19, 2011)\n\nFeatures:\n\n - General code tidy up (@ryanlecompte, #22, @mocoso, #28)\n - Lazy loading data from Redis (@lautis, #25)\n\nBugfixes:\n\n - Handle unstarted experiments (@mocoso, #27)\n - Relaxed Sinatra version requirement (@martinclu, #24)\n\n\n## 0.3.0 (October 9, 2011)\n\nFeatures:\n\n - Redesigned dashboard (@mrappleton, #17)\n - Use atomic increments in redis for better concurrency (@lautis, #18)\n - Weighted alternatives\n\nBugfixes:\n\n - Fix to allow overriding of experiments that aren't on version 1\n\n\n## 0.2.4 (July 18, 2011)\n\nFeatures:\n\n - Added option to finished to not reset the users session\n\nBugfixes:\n\n - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols\n\n## 0.2.3 (June 26, 2011)\n\nFeatures:\n\n - Experiments can now be deleted from the dashboard\n - ab_test helper now accepts a block\n - Improved dashboard\n\nBugfixes:\n\n - After resetting an experiment, existing users of that experiment will also be reset\n\n## 0.2.2 (June 11, 2011)\n\nFeatures:\n\n - Updated redis-namespace requirement to 1.0.3\n - Added a configuration object for changing options\n - Robot regex can now be changed via a configuration options\n - Added ability to ignore visits from specified IP addresses\n - Dashboard now shows percentage improvement of alternatives compared to the control\n - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives\n\nBugfixes:\n\n - Saving an experiment multiple times no longer creates duplicate alternatives\n\n## 0.2.1 (May 29, 2011)\n\nBugfixes:\n\n - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x\n\n## 0.2.0 (May 29, 2011)\n\nFeatures:\n\n - Override an alternative via a url parameter\n - Experiments can now be reset from the dashboard\n - The first alternative is now considered the control\n - General dashboard usability improvements\n - Robots are ignored and given the control alternative\n\nBugfixes:\n\n - Alternatives are now store in a list rather than a set to ensure consistent ordering\n - Fixed diving by zero errors\n\n## 0.1.1 (May 18, 2011)\n\nBugfixes:\n\n - More Robust conversion rate display on dashboard\n - Ensure `Split::Version` is available everywhere, fixed dashboard\n\n## 0.1.0 (May 17, 2011)\n\nInitial Release\n\n Add Changelog\n\n @@ -1,3 +1,8 @@\n+## 3.4.1 (November 12th, 2019)\n+\n+Bugfixes:\n+- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n+\n ## 3.4.0 (November 9th, 2019)\n \n Features:\n"},"addition_count":{"kind":"number","value":5,"string":"5"},"commit_subject":{"kind":"string","value":"Add Changelog"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675152,"cells":{"id":{"kind":"string","value":"10070802"},"text":{"kind":"string","value":" .gitignore\n build\nnode_modules\n\n ignore foo.js\n\n @@ -1,2 +1,3 @@\n build\n node_modules\n+foo.js\n\\ No newline at end of file\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"ignore foo.js"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":""},"lang":{"kind":"string","value":"gitignore"},"license":{"kind":"string","value":"bsd-2-clause"},"repo_name":{"kind":"string","value":"jlongster/transducers.js"}}},{"rowIdx":10675153,"cells":{"id":{"kind":"string","value":"10070803"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n s.require_paths = [\"lib\"]\n\n s.add_dependency 'redis', '>= 2.1'\n s.add_dependency 'redis-namespace', '>= 1.1.0'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Remove dependency on Redis::Namespace (#425)\n\n* Rename redis_url config to redis\r\n\r\n* Remove dependency on redis-namespace\r\n\r\n* Add backwards compatible redis_url with deprecation\r\n\n @@ -21,7 +21,6 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency 'redis', '>= 2.1'\n- s.add_dependency 'redis-namespace', '>= 1.1.0'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n \n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Remove dependency on Redis::Namespace (#425)"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675154,"cells":{"id":{"kind":"string","value":"10070804"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n s.require_paths = [\"lib\"]\n\n s.add_dependency 'redis', '>= 2.1'\n s.add_dependency 'redis-namespace', '>= 1.1.0'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Remove dependency on Redis::Namespace (#425)\n\n* Rename redis_url config to redis\r\n\r\n* Remove dependency on redis-namespace\r\n\r\n* Add backwards compatible redis_url with deprecation\r\n\n @@ -21,7 +21,6 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency 'redis', '>= 2.1'\n- s.add_dependency 'redis-namespace', '>= 1.1.0'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n \n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Remove dependency on Redis::Namespace (#425)"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675155,"cells":{"id":{"kind":"string","value":"10070805"},"text":{"kind":"string","value":" split.gemspec\n # -*- encoding: utf-8 -*-\n# frozen_string_literal: true\n\n$:.push File.expand_path(\"../lib\", __FILE__)\nrequire \"split/version\"\n\nGem::Specification.new do |s|\n s.name = \"split\"\n s.version = Split::VERSION\n s.platform = Gem::Platform::RUBY\n s.authors = [\"Andrew Nesbitt\"]\n s.licenses = [\"MIT\"]\n s.email = [\"andrewnez@gmail.com\"]\n s.homepage = \"https://github.com/splitrb/split\"\n s.summary = \"Rack based split testing framework\"\n\n s.metadata = {\n \"homepage_uri\" => \"https://github.com/splitrb/split\",\n \"changelog_uri\" => \"https://github.com/splitrb/split/blob/main/CHANGELOG.md\",\n \"source_code_uri\" => \"https://github.com/splitrb/split\",\n s.require_paths = [\"lib\"]\n\n s.add_dependency 'redis', '>= 2.1'\n s.add_dependency 'redis-namespace', '>= 1.1.0'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n\n s.required_rubygems_version = \">= 2.0.0\"\n\n s.files = `git ls-files`.split(\"\\n\")\n s.test_files = `git ls-files -- {test,spec,features}/*`.split(\"\\n\")\n s.require_paths = [\"lib\"]\n\n s.add_dependency \"redis\", \">= 4.2\"\n s.add_dependency \"sinatra\", \">= 1.2.6\"\n s.add_dependency \"rubystats\", \">= 0.3.0\"\n\n s.add_development_dependency \"bundler\", \">= 1.17\"\n s.add_development_dependency \"simplecov\", \"~> 0.15\"\n s.add_development_dependency \"rack-test\", \"~> 2.0\"\n s.add_development_dependency \"rake\", \"~> 13\"\n s.add_development_dependency \"rspec\", \"~> 3.7\"\n s.add_development_dependency \"pry\", \"~> 0.10\"\n s.add_development_dependency \"rails\", \">= 5.0\"\nend\n\n Remove dependency on Redis::Namespace (#425)\n\n* Rename redis_url config to redis\r\n\r\n* Remove dependency on redis-namespace\r\n\r\n* Add backwards compatible redis_url with deprecation\r\n\n @@ -21,7 +21,6 @@ Gem::Specification.new do |s|\n s.require_paths = [\"lib\"]\n \n s.add_dependency 'redis', '>= 2.1'\n- s.add_dependency 'redis-namespace', '>= 1.1.0'\n s.add_dependency 'sinatra', '>= 1.2.6'\n s.add_dependency 'simple-random', '>= 0.9.3'\n \n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Remove dependency on Redis::Namespace (#425)"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".gemspec"},"lang":{"kind":"string","value":"gemspec"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675156,"cells":{"id":{"kind":"string","value":"10070806"},"text":{"kind":"string","value":" configuration_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\ndescribe Split::Configuration do\n before(:each) { @config = Split::Configuration.new }\n\n it \"should provide a default value for ignore_ip_addresses\" do\n expect(@config.ignore_ip_addresses).to eq([])\n end\n\n it \"should provide default values for db failover\" do\n expect(@config.db_failover).to be_falsey\n expect(@config.db_failover_on_db_error).to be_a Proc\n end\n\n it \"should not allow multiple experiments by default\" do\n expect(@config.allow_multiple_experiments).to be_falsey\n end\n\n it \"should be enabled by default\" do\n expect(@config.enabled).to be_truthy\n end\n\n it \"disabled is the opposite of enabled\" do\n @config.enabled = false\n expect(@config.disabled?).to be_truthy\n end\n\n it \"should not store the overridden test group per default\" do\n expect(@config.store_override).to be_falsey\n end\n\n it \"should provide a default pattern for robots\" do\n %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot|\n expect(@config.robot_regex).to match(robot)\n end\n\n expect(@config.robot_regex).to match(\"EventMachine HttpClient\")\n expect(@config.robot_regex).to match(\"libwww-perl/5.836\")\n expect(@config.robot_regex).to match(\"Pingdom.com_bot_version_1.4_(http://www.pingdom.com)\")\n\n expect(@config.robot_regex).to match(\" - \")\n end\n\n it \"should accept real UAs with the robot regexp\" do\n expect(@config.robot_regex).not_to match(\"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0\")\n expect(@config.robot_regex).not_to match(\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)\")\n end\n\n it \"should allow adding a bot to the bot list\" do\n @config.bots[\"newbot\"] = \"An amazing test bot\"\n expect(@config.robot_regex).to match(\"newbot\")\n end\n\n it \"should use the session adapter for persistence by default\" do\n expect(@config.persistence).to eq(Split::Persistence::SessionAdapter)\n end\n\n it \"should load a metric\" do\n @config.experiments = { my_experiment: { alternatives: [\"control_opt\", \"other_opt\"], metric: :my_metric } }\n\n expect(@config.metrics).not_to be_nil\n expect(@config.metrics.keys).to eq([:my_metric])\n end\n\n it \"should allow loading of experiment using experment_for\" do\n @config.experiments = { my_experiment: { alternatives: [\"control_opt\", \"other_opt\"], metric: :my_metric } }\n expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: [\"control_opt\", [\"other_opt\"]] })\n end\n\n context \"when experiments are defined via YAML\" do\n context \"as strings\" do\n context \"in a basic configuration\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - Control Opt\n - Alt One\n - Alt Two\n resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [\"Control Opt\", [\"Alt One\", \"Alt Two\"]] } })\n end\n end\n\n context \"in a configuration with metadata\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - name: Control Opt\n percent: 67\n - name: Alt One\n percent: 10\n - name: Alt Two\n percent: 23\n metadata:\n Control Opt:\n text: 'Control Option'\n Alt One:\n text: 'Alternative One'\n Alt Two:\n text: 'Alternative Two'\n resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should have metadata on the experiment\" do\n meta = @config.normalized_experiments[:my_experiment][:metadata]\n expect(meta).to_not be nil\n expect(meta[\"Control Opt\"][\"text\"]).to eq(\"Control Option\")\n end\n end\n\n context \"in a complex configuration\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - name: Control Opt\n percent: 67\n - name: Alt One\n percent: 10\n - name: Alt Two\n percent: 23\n resettable: false\n metric: my_metric\n another_experiment:\n alternatives:\n - a\n - b\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ \"Control Opt\"=>0.67 },\n [{ \"Alt One\"=>0.1 }, { \"Alt Two\"=>0.23 }]] }, another_experiment: { alternatives: [\"a\", [\"b\"]] } })\n end\n\n it \"should recognize metrics\" do\n expect(@config.metrics).not_to be_nil\n expect(@config.metrics.keys).to eq([:my_metric])\n end\n end\n end\n\n context \"as symbols\" do\n context \"with valid YAML\" do\n before do\n experiments_yaml = <<-eos\n :my_experiment:\n :alternatives:\n - Control Opt\n - Alt One\n - Alt Two\n :resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [\"Control Opt\", [\"Alt One\", \"Alt Two\"]] } })\n end\n end\n\n context \"with invalid YAML\" do\n let(:yaml) { YAML.load(input) }\n\n context \"with an empty string\" do\n let(:input) { \"\" }\n\n it \"should raise an error\" do\n expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n end\n\n context \"with just the YAML header\" do\n let(:input) { \"---\" }\n\n it \"should raise an error\" do\n expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n end\n end\n end\n end\n\n it \"should normalize experiments\" do\n @config.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n }\n\n expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ \"control_opt\"=>0.67 }, [{ \"second_opt\"=>0.1 }, { \"third_opt\"=>0.23 }]] } })\n end\n\n context \"redis configuration\" do\n it \"should default to local redis server\" do\n old_redis_url = ENV[\"REDIS_URL\"]\n ENV.delete(\"REDIS_URL\")\n\n context \"redis configuration\" do\n it \"should default to local redis server\" do\n expect(@config.redis).to eq(\"redis://localhost:6379\")\n end\n\n it \"should allow for redis url to be configured\" do\n it \"should use the ENV variable\" do\n old_redis_url = ENV[\"REDIS_URL\"]\n ENV[\"REDIS_URL\"] = \"env_redis_url\"\n\n context \"provided REDIS_URL environment variable\" do\n it \"should use the ENV variable\" do\n ENV['REDIS_URL'] = \"env_redis_url\"\n expect(Split::Configuration.new.redis).to eq(\"env_redis_url\")\n ENV.delete('REDIS_URL')\n end\n end\n end\n\n it \"should allow the persistence cookie length to be configured\" do\n @config.persistence_cookie_length = 2592000\n expect(@config.persistence_cookie_length).to eq(2592000)\n end\n end\n\n context \"persistence cookie domain\" do\n it \"should default to nil\" do\n expect(@config.persistence_cookie_domain).to eq(nil)\n end\n\n it \"should allow the persistence cookie domain to be configured\" do\n @config.persistence_cookie_domain = \".acme.com\"\n expect(@config.persistence_cookie_domain).to eq(\".acme.com\")\n end\n end\nend\n\n Merge pull request #662 from splitrb/gh-actions\n\nMoving to Github Actions for CI\n @@ -214,7 +214,10 @@ describe Split::Configuration do\n \n context \"redis configuration\" do\n it \"should default to local redis server\" do\n- expect(@config.redis).to eq(\"redis://localhost:6379\")\n+ old_redis_url = ENV['REDIS_URL']\n+ ENV.delete('REDIS_URL')\n+ expect(Split::Configuration.new.redis).to eq(\"redis://localhost:6379\")\n+ ENV['REDIS_URL'] = old_redis_url\n end\n \n it \"should allow for redis url to be configured\" do\n@@ -224,9 +227,10 @@ describe Split::Configuration do\n \n context \"provided REDIS_URL environment variable\" do\n it \"should use the ENV variable\" do\n+ old_redis_url = ENV['REDIS_URL']\n ENV['REDIS_URL'] = \"env_redis_url\"\n expect(Split::Configuration.new.redis).to eq(\"env_redis_url\")\n- ENV.delete('REDIS_URL')\n+ ENV['REDIS_URL'] = old_redis_url\n end\n end\n end\n"},"addition_count":{"kind":"number","value":6,"string":"6"},"commit_subject":{"kind":"string","value":"Merge pull request #662 from splitrb/gh-actions"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675157,"cells":{"id":{"kind":"string","value":"10070807"},"text":{"kind":"string","value":" configuration_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\ndescribe Split::Configuration do\n before(:each) { @config = Split::Configuration.new }\n\n it \"should provide a default value for ignore_ip_addresses\" do\n expect(@config.ignore_ip_addresses).to eq([])\n end\n\n it \"should provide default values for db failover\" do\n expect(@config.db_failover).to be_falsey\n expect(@config.db_failover_on_db_error).to be_a Proc\n end\n\n it \"should not allow multiple experiments by default\" do\n expect(@config.allow_multiple_experiments).to be_falsey\n end\n\n it \"should be enabled by default\" do\n expect(@config.enabled).to be_truthy\n end\n\n it \"disabled is the opposite of enabled\" do\n @config.enabled = false\n expect(@config.disabled?).to be_truthy\n end\n\n it \"should not store the overridden test group per default\" do\n expect(@config.store_override).to be_falsey\n end\n\n it \"should provide a default pattern for robots\" do\n %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot|\n expect(@config.robot_regex).to match(robot)\n end\n\n expect(@config.robot_regex).to match(\"EventMachine HttpClient\")\n expect(@config.robot_regex).to match(\"libwww-perl/5.836\")\n expect(@config.robot_regex).to match(\"Pingdom.com_bot_version_1.4_(http://www.pingdom.com)\")\n\n expect(@config.robot_regex).to match(\" - \")\n end\n\n it \"should accept real UAs with the robot regexp\" do\n expect(@config.robot_regex).not_to match(\"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0\")\n expect(@config.robot_regex).not_to match(\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)\")\n end\n\n it \"should allow adding a bot to the bot list\" do\n @config.bots[\"newbot\"] = \"An amazing test bot\"\n expect(@config.robot_regex).to match(\"newbot\")\n end\n\n it \"should use the session adapter for persistence by default\" do\n expect(@config.persistence).to eq(Split::Persistence::SessionAdapter)\n end\n\n it \"should load a metric\" do\n @config.experiments = { my_experiment: { alternatives: [\"control_opt\", \"other_opt\"], metric: :my_metric } }\n\n expect(@config.metrics).not_to be_nil\n expect(@config.metrics.keys).to eq([:my_metric])\n end\n\n it \"should allow loading of experiment using experment_for\" do\n @config.experiments = { my_experiment: { alternatives: [\"control_opt\", \"other_opt\"], metric: :my_metric } }\n expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: [\"control_opt\", [\"other_opt\"]] })\n end\n\n context \"when experiments are defined via YAML\" do\n context \"as strings\" do\n context \"in a basic configuration\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - Control Opt\n - Alt One\n - Alt Two\n resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [\"Control Opt\", [\"Alt One\", \"Alt Two\"]] } })\n end\n end\n\n context \"in a configuration with metadata\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - name: Control Opt\n percent: 67\n - name: Alt One\n percent: 10\n - name: Alt Two\n percent: 23\n metadata:\n Control Opt:\n text: 'Control Option'\n Alt One:\n text: 'Alternative One'\n Alt Two:\n text: 'Alternative Two'\n resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should have metadata on the experiment\" do\n meta = @config.normalized_experiments[:my_experiment][:metadata]\n expect(meta).to_not be nil\n expect(meta[\"Control Opt\"][\"text\"]).to eq(\"Control Option\")\n end\n end\n\n context \"in a complex configuration\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - name: Control Opt\n percent: 67\n - name: Alt One\n percent: 10\n - name: Alt Two\n percent: 23\n resettable: false\n metric: my_metric\n another_experiment:\n alternatives:\n - a\n - b\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ \"Control Opt\"=>0.67 },\n [{ \"Alt One\"=>0.1 }, { \"Alt Two\"=>0.23 }]] }, another_experiment: { alternatives: [\"a\", [\"b\"]] } })\n end\n\n it \"should recognize metrics\" do\n expect(@config.metrics).not_to be_nil\n expect(@config.metrics.keys).to eq([:my_metric])\n end\n end\n end\n\n context \"as symbols\" do\n context \"with valid YAML\" do\n before do\n experiments_yaml = <<-eos\n :my_experiment:\n :alternatives:\n - Control Opt\n - Alt One\n - Alt Two\n :resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [\"Control Opt\", [\"Alt One\", \"Alt Two\"]] } })\n end\n end\n\n context \"with invalid YAML\" do\n let(:yaml) { YAML.load(input) }\n\n context \"with an empty string\" do\n let(:input) { \"\" }\n\n it \"should raise an error\" do\n expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n end\n\n context \"with just the YAML header\" do\n let(:input) { \"---\" }\n\n it \"should raise an error\" do\n expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n end\n end\n end\n end\n\n it \"should normalize experiments\" do\n @config.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n }\n\n expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ \"control_opt\"=>0.67 }, [{ \"second_opt\"=>0.1 }, { \"third_opt\"=>0.23 }]] } })\n end\n\n context \"redis configuration\" do\n it \"should default to local redis server\" do\n old_redis_url = ENV[\"REDIS_URL\"]\n ENV.delete(\"REDIS_URL\")\n\n context \"redis configuration\" do\n it \"should default to local redis server\" do\n expect(@config.redis).to eq(\"redis://localhost:6379\")\n end\n\n it \"should allow for redis url to be configured\" do\n it \"should use the ENV variable\" do\n old_redis_url = ENV[\"REDIS_URL\"]\n ENV[\"REDIS_URL\"] = \"env_redis_url\"\n\n context \"provided REDIS_URL environment variable\" do\n it \"should use the ENV variable\" do\n ENV['REDIS_URL'] = \"env_redis_url\"\n expect(Split::Configuration.new.redis).to eq(\"env_redis_url\")\n ENV.delete('REDIS_URL')\n end\n end\n end\n\n it \"should allow the persistence cookie length to be configured\" do\n @config.persistence_cookie_length = 2592000\n expect(@config.persistence_cookie_length).to eq(2592000)\n end\n end\n\n context \"persistence cookie domain\" do\n it \"should default to nil\" do\n expect(@config.persistence_cookie_domain).to eq(nil)\n end\n\n it \"should allow the persistence cookie domain to be configured\" do\n @config.persistence_cookie_domain = \".acme.com\"\n expect(@config.persistence_cookie_domain).to eq(\".acme.com\")\n end\n end\nend\n\n Merge pull request #662 from splitrb/gh-actions\n\nMoving to Github Actions for CI\n @@ -214,7 +214,10 @@ describe Split::Configuration do\n \n context \"redis configuration\" do\n it \"should default to local redis server\" do\n- expect(@config.redis).to eq(\"redis://localhost:6379\")\n+ old_redis_url = ENV['REDIS_URL']\n+ ENV.delete('REDIS_URL')\n+ expect(Split::Configuration.new.redis).to eq(\"redis://localhost:6379\")\n+ ENV['REDIS_URL'] = old_redis_url\n end\n \n it \"should allow for redis url to be configured\" do\n@@ -224,9 +227,10 @@ describe Split::Configuration do\n \n context \"provided REDIS_URL environment variable\" do\n it \"should use the ENV variable\" do\n+ old_redis_url = ENV['REDIS_URL']\n ENV['REDIS_URL'] = \"env_redis_url\"\n expect(Split::Configuration.new.redis).to eq(\"env_redis_url\")\n- ENV.delete('REDIS_URL')\n+ ENV['REDIS_URL'] = old_redis_url\n end\n end\n end\n"},"addition_count":{"kind":"number","value":6,"string":"6"},"commit_subject":{"kind":"string","value":"Merge pull request #662 from splitrb/gh-actions"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675158,"cells":{"id":{"kind":"string","value":"10070808"},"text":{"kind":"string","value":" configuration_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\ndescribe Split::Configuration do\n before(:each) { @config = Split::Configuration.new }\n\n it \"should provide a default value for ignore_ip_addresses\" do\n expect(@config.ignore_ip_addresses).to eq([])\n end\n\n it \"should provide default values for db failover\" do\n expect(@config.db_failover).to be_falsey\n expect(@config.db_failover_on_db_error).to be_a Proc\n end\n\n it \"should not allow multiple experiments by default\" do\n expect(@config.allow_multiple_experiments).to be_falsey\n end\n\n it \"should be enabled by default\" do\n expect(@config.enabled).to be_truthy\n end\n\n it \"disabled is the opposite of enabled\" do\n @config.enabled = false\n expect(@config.disabled?).to be_truthy\n end\n\n it \"should not store the overridden test group per default\" do\n expect(@config.store_override).to be_falsey\n end\n\n it \"should provide a default pattern for robots\" do\n %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot|\n expect(@config.robot_regex).to match(robot)\n end\n\n expect(@config.robot_regex).to match(\"EventMachine HttpClient\")\n expect(@config.robot_regex).to match(\"libwww-perl/5.836\")\n expect(@config.robot_regex).to match(\"Pingdom.com_bot_version_1.4_(http://www.pingdom.com)\")\n\n expect(@config.robot_regex).to match(\" - \")\n end\n\n it \"should accept real UAs with the robot regexp\" do\n expect(@config.robot_regex).not_to match(\"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0\")\n expect(@config.robot_regex).not_to match(\"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)\")\n end\n\n it \"should allow adding a bot to the bot list\" do\n @config.bots[\"newbot\"] = \"An amazing test bot\"\n expect(@config.robot_regex).to match(\"newbot\")\n end\n\n it \"should use the session adapter for persistence by default\" do\n expect(@config.persistence).to eq(Split::Persistence::SessionAdapter)\n end\n\n it \"should load a metric\" do\n @config.experiments = { my_experiment: { alternatives: [\"control_opt\", \"other_opt\"], metric: :my_metric } }\n\n expect(@config.metrics).not_to be_nil\n expect(@config.metrics.keys).to eq([:my_metric])\n end\n\n it \"should allow loading of experiment using experment_for\" do\n @config.experiments = { my_experiment: { alternatives: [\"control_opt\", \"other_opt\"], metric: :my_metric } }\n expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: [\"control_opt\", [\"other_opt\"]] })\n end\n\n context \"when experiments are defined via YAML\" do\n context \"as strings\" do\n context \"in a basic configuration\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - Control Opt\n - Alt One\n - Alt Two\n resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [\"Control Opt\", [\"Alt One\", \"Alt Two\"]] } })\n end\n end\n\n context \"in a configuration with metadata\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - name: Control Opt\n percent: 67\n - name: Alt One\n percent: 10\n - name: Alt Two\n percent: 23\n metadata:\n Control Opt:\n text: 'Control Option'\n Alt One:\n text: 'Alternative One'\n Alt Two:\n text: 'Alternative Two'\n resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should have metadata on the experiment\" do\n meta = @config.normalized_experiments[:my_experiment][:metadata]\n expect(meta).to_not be nil\n expect(meta[\"Control Opt\"][\"text\"]).to eq(\"Control Option\")\n end\n end\n\n context \"in a complex configuration\" do\n before do\n experiments_yaml = <<-eos\n my_experiment:\n alternatives:\n - name: Control Opt\n percent: 67\n - name: Alt One\n percent: 10\n - name: Alt Two\n percent: 23\n resettable: false\n metric: my_metric\n another_experiment:\n alternatives:\n - a\n - b\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ \"Control Opt\"=>0.67 },\n [{ \"Alt One\"=>0.1 }, { \"Alt Two\"=>0.23 }]] }, another_experiment: { alternatives: [\"a\", [\"b\"]] } })\n end\n\n it \"should recognize metrics\" do\n expect(@config.metrics).not_to be_nil\n expect(@config.metrics.keys).to eq([:my_metric])\n end\n end\n end\n\n context \"as symbols\" do\n context \"with valid YAML\" do\n before do\n experiments_yaml = <<-eos\n :my_experiment:\n :alternatives:\n - Control Opt\n - Alt One\n - Alt Two\n :resettable: false\n eos\n @config.experiments = YAML.load(experiments_yaml)\n end\n\n it \"should normalize experiments\" do\n expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [\"Control Opt\", [\"Alt One\", \"Alt Two\"]] } })\n end\n end\n\n context \"with invalid YAML\" do\n let(:yaml) { YAML.load(input) }\n\n context \"with an empty string\" do\n let(:input) { \"\" }\n\n it \"should raise an error\" do\n expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n end\n\n context \"with just the YAML header\" do\n let(:input) { \"---\" }\n\n it \"should raise an error\" do\n expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n end\n end\n end\n end\n\n it \"should normalize experiments\" do\n @config.experiments = {\n my_experiment: {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n }\n\n expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ \"control_opt\"=>0.67 }, [{ \"second_opt\"=>0.1 }, { \"third_opt\"=>0.23 }]] } })\n end\n\n context \"redis configuration\" do\n it \"should default to local redis server\" do\n old_redis_url = ENV[\"REDIS_URL\"]\n ENV.delete(\"REDIS_URL\")\n\n context \"redis configuration\" do\n it \"should default to local redis server\" do\n expect(@config.redis).to eq(\"redis://localhost:6379\")\n end\n\n it \"should allow for redis url to be configured\" do\n it \"should use the ENV variable\" do\n old_redis_url = ENV[\"REDIS_URL\"]\n ENV[\"REDIS_URL\"] = \"env_redis_url\"\n\n context \"provided REDIS_URL environment variable\" do\n it \"should use the ENV variable\" do\n ENV['REDIS_URL'] = \"env_redis_url\"\n expect(Split::Configuration.new.redis).to eq(\"env_redis_url\")\n ENV.delete('REDIS_URL')\n end\n end\n end\n\n it \"should allow the persistence cookie length to be configured\" do\n @config.persistence_cookie_length = 2592000\n expect(@config.persistence_cookie_length).to eq(2592000)\n end\n end\n\n context \"persistence cookie domain\" do\n it \"should default to nil\" do\n expect(@config.persistence_cookie_domain).to eq(nil)\n end\n\n it \"should allow the persistence cookie domain to be configured\" do\n @config.persistence_cookie_domain = \".acme.com\"\n expect(@config.persistence_cookie_domain).to eq(\".acme.com\")\n end\n end\nend\n\n Merge pull request #662 from splitrb/gh-actions\n\nMoving to Github Actions for CI\n @@ -214,7 +214,10 @@ describe Split::Configuration do\n \n context \"redis configuration\" do\n it \"should default to local redis server\" do\n- expect(@config.redis).to eq(\"redis://localhost:6379\")\n+ old_redis_url = ENV['REDIS_URL']\n+ ENV.delete('REDIS_URL')\n+ expect(Split::Configuration.new.redis).to eq(\"redis://localhost:6379\")\n+ ENV['REDIS_URL'] = old_redis_url\n end\n \n it \"should allow for redis url to be configured\" do\n@@ -224,9 +227,10 @@ describe Split::Configuration do\n \n context \"provided REDIS_URL environment variable\" do\n it \"should use the ENV variable\" do\n+ old_redis_url = ENV['REDIS_URL']\n ENV['REDIS_URL'] = \"env_redis_url\"\n expect(Split::Configuration.new.redis).to eq(\"env_redis_url\")\n- ENV.delete('REDIS_URL')\n+ ENV['REDIS_URL'] = old_redis_url\n end\n end\n end\n"},"addition_count":{"kind":"number","value":6,"string":"6"},"commit_subject":{"kind":"string","value":"Merge pull request #662 from splitrb/gh-actions"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675159,"cells":{"id":{"kind":"string","value":"10070809"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2)\n experiment = Split::ExperimentCatalog.find('link_color')\n expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red'])\n # TODO: persist alternative weights\n # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n\n context \"for an experiment that the user is excluded from\" do\n before do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Merge pull request #570 from giraffate/persist_alternative_weights\n\nPersist alternative weights\n @@ -183,8 +183,7 @@ describe Split::Helper do\n ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2)\n experiment = Split::ExperimentCatalog.find('link_color')\n expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red'])\n- # TODO: persist alternative weights\n- # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2])\n+ expect(experiment.alternatives.collect{|a| a.weight}).to match_array([0.01, 0.2])\n end\n \n it \"should only let a user participate in one experiment at a time\" do\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #570 from giraffate/persist_alternative_weights"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675160,"cells":{"id":{"kind":"string","value":"10070810"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2)\n experiment = Split::ExperimentCatalog.find('link_color')\n expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red'])\n # TODO: persist alternative weights\n # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n\n context \"for an experiment that the user is excluded from\" do\n before do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Merge pull request #570 from giraffate/persist_alternative_weights\n\nPersist alternative weights\n @@ -183,8 +183,7 @@ describe Split::Helper do\n ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2)\n experiment = Split::ExperimentCatalog.find('link_color')\n expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red'])\n- # TODO: persist alternative weights\n- # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2])\n+ expect(experiment.alternatives.collect{|a| a.weight}).to match_array([0.01, 0.2])\n end\n \n it \"should only let a user participate in one experiment at a time\" do\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #570 from giraffate/persist_alternative_weights"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675161,"cells":{"id":{"kind":"string","value":"10070811"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2)\n experiment = Split::ExperimentCatalog.find('link_color')\n expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red'])\n # TODO: persist alternative weights\n # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n\n context \"for an experiment that the user is excluded from\" do\n before do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Merge pull request #570 from giraffate/persist_alternative_weights\n\nPersist alternative weights\n @@ -183,8 +183,7 @@ describe Split::Helper do\n ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2)\n experiment = Split::ExperimentCatalog.find('link_color')\n expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red'])\n- # TODO: persist alternative weights\n- # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2])\n+ expect(experiment.alternatives.collect{|a| a.weight}).to match_array([0.01, 0.2])\n end\n \n it \"should only let a user participate in one experiment at a time\" do\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #570 from giraffate/persist_alternative_weights"},"deletion_count":{"kind":"number","value":2,"string":"2"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675162,"cells":{"id":{"kind":"string","value":"10070812"},"text":{"kind":"string","value":" experiment.rb\n # frozen_string_literal: true\n\nmodule Split\n class Experiment\n attr_accessor :name\n attr_accessor :goals\n attr_accessor :alternative_probabilities\n attr_accessor :metadata\n\n attr_reader :alternatives\n attr_reader :resettable\n\n DEFAULT_OPTIONS = {\n resettable: true\n }\n\n def self.find(name)\n Split.cache(:experiments, name) do\n return unless Split.redis.exists?(name)\n Experiment.new(name).tap { |exp| exp.load_from_redis }\n end\n end\n\n def initialize(name, options = {})\n options = DEFAULT_OPTIONS.merge(options)\n\n @name = name.to_s\n\n extract_alternatives_from_options(options)\n end\n\n def self.finished_key(key)\n \"#{key}:finished\"\n end\n\n def set_alternatives_and_options(options)\n options_with_defaults = DEFAULT_OPTIONS.merge(\n options.reject { |k, v| v.nil? }\n )\n\n self.alternatives = options_with_defaults[:alternatives]\n self.goals = options_with_defaults[:goals]\n self.resettable = options_with_defaults[:resettable]\n self.algorithm = options_with_defaults[:algorithm]\n self.metadata = options_with_defaults[:metadata]\n end\n\n def extract_alternatives_from_options(options)\n alts = options[:alternatives] || []\n\n if alts.length == 1\n if alts[0].is_a? Hash\n alts = alts[0].map { |k, v| { k => v } }\n end\n end\n\n if alts.empty?\n exp_config = Split.configuration.experiment_for(name)\n if exp_config\n alts = load_alternatives_from_configuration\n options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration\n options[:metadata] = load_metadata_from_configuration\n options[:resettable] = exp_config[:resettable]\n options[:algorithm] = exp_config[:algorithm]\n end\n end\n\n options[:alternatives] = alts\n\n set_alternatives_and_options(options)\n\n # calculate probability that each alternative is the winner\n @alternative_probabilities = {}\n alts\n end\n\n def save\n validate!\n\n if new_record?\n start unless Split.configuration.start_manually\n persist_experiment_configuration\n elsif experiment_configuration_has_changed?\n reset unless Split.configuration.reset_manually\n persist_experiment_configuration\n @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)}\n goals_collection.save\n save_metadata\n Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil?\n else\n existing_alternatives = load_alternatives_from_redis\n existing_goals = Split::GoalsCollection.new(@name).load_from_redis\n\n def validate!\n if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?\n raise ExperimentNotFound.new(\"Experiment #{@name} not found\")\n end\n @alternatives.each { |a| a.validate! }\n goals_collection.validate!\n end\n\n def new_record?\n ExperimentCatalog.find(name).nil?\n end\n\n def ==(obj)\n self.name == obj.name\n end\n\n def [](name)\n alternatives.find { |a| a.name == name }\n end\n\n def algorithm\n @algorithm ||= Split.configuration.algorithm\n end\n\n def algorithm=(algorithm)\n @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm\n end\n\n def resettable=(resettable)\n @resettable = resettable.is_a?(String) ? resettable == \"true\" : resettable\n end\n\n def alternatives=(alts)\n @alternatives = alts.map do |alternative|\n if alternative.kind_of?(Split::Alternative)\n alternative\n else\n Split::Alternative.new(alternative, @name)\n end\n end\n end\n\n def winner\n Split.cache(:experiment_winner, name) do\n experiment_winner = redis.hget(:experiment_winner, name)\n if experiment_winner\n Split::Alternative.new(experiment_winner, name)\n else\n nil\n end\n end\n end\n\n def has_winner?\n return @has_winner if defined? @has_winner\n @has_winner = !winner.nil?\n end\n\n def winner=(winner_name)\n redis.hset(:experiment_winner, name, winner_name.to_s)\n @has_winner = true\n Split.configuration.on_experiment_winner_choose.call(self)\n end\n\n def participant_count\n alternatives.inject(0) { |sum, a| sum + a.participant_count }\n end\n\n def control\n alternatives.first\n end\n\n def reset_winner\n redis.hdel(:experiment_winner, name)\n @has_winner = false\n Split::Cache.clear_key(@name)\n end\n\n def start\n redis.hset(:experiment_start_times, @name, Time.now.to_i)\n end\n\n def start_time\n Split.cache(:experiment_start_times, @name) do\n t = redis.hget(:experiment_start_times, @name)\n if t\n # Check if stored time is an integer\n if t =~ /^[-+]?[0-9]+$/\n Time.at(t.to_i)\n else\n Time.parse(t)\n end\n end\n end\n end\n\n def next_alternative\n winner || random_alternative\n end\n\n def random_alternative\n if alternatives.length > 1\n algorithm.choose_alternative(self)\n else\n alternatives.first\n end\n end\n\n def version\n @version ||= (redis.get(\"#{name}:version\").to_i || 0)\n end\n\n def increment_version\n @version = redis.incr(\"#{name}:version\")\n end\n\n def key\n if version.to_i > 0\n \"#{name}:#{version}\"\n else\n name\n end\n end\n\n def goals_key\n \"#{name}:goals\"\n end\n\n def finished_key\n self.class.finished_key(key)\n end\n\n def metadata_key\n \"#{name}:metadata\"\n end\n\n def resettable?\n resettable\n end\n\n def reset\n Split.configuration.on_before_experiment_reset.call(self)\n Split::Cache.clear_key(@name)\n alternatives.each(&:reset)\n reset_winner\n Split.configuration.on_experiment_reset.call(self)\n increment_version\n end\n\n def delete\n Split.configuration.on_before_experiment_delete.call(self)\n if Split.configuration.start_manually\n redis.hdel(:experiment_start_times, @name)\n end\n reset_winner\n redis.srem(:experiments, name)\n remove_experiment_cohorting\n remove_experiment_configuration\n Split.configuration.on_experiment_delete.call(self)\n increment_version\n end\n\n def delete_metadata\n redis.del(metadata_key)\n end\n\n def load_from_redis\n exp_config = redis.hgetall(experiment_config_key)\n\n options = {\n resettable: exp_config[\"resettable\"],\n algorithm: exp_config[\"algorithm\"],\n alternatives: load_alternatives_from_redis,\n goals: Split::GoalsCollection.new(@name).load_from_redis,\n metadata: load_metadata_from_redis\n }\n\n set_alternatives_and_options(options)\n end\n\n def calc_winning_alternatives\n # Cache the winning alternatives so we recalculate them once per the specified interval.\n intervals_since_epoch =\n Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval\n\n if self.calc_time != intervals_since_epoch\n if goals.empty?\n self.estimate_winning_alternative\n else\n goals.each do |goal|\n self.estimate_winning_alternative(goal)\n end\n end\n\n self.calc_time = intervals_since_epoch\n\n self.save\n end\n end\n\n def estimate_winning_alternative(goal = nil)\n # initialize a hash of beta distributions based on the alternatives' conversion rates\n beta_params = calc_beta_params(goal)\n\n winning_alternatives = []\n\n Split.configuration.beta_probability_simulations.times do\n # calculate simulated conversion rates from the beta distributions\n simulated_cr_hash = calc_simulated_conversion_rates(beta_params)\n\n winning_alternative = find_simulated_winner(simulated_cr_hash)\n\n # push the winning pair to the winning_alternatives array\n winning_alternatives.push(winning_alternative)\n end\n\n winning_counts = count_simulated_wins(winning_alternatives)\n\n @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)\n\n write_to_alternatives(goal)\n\n self.save\n end\n\n def write_to_alternatives(goal = nil)\n alternatives.each do |alternative|\n alternative.set_p_winner(@alternative_probabilities[alternative], goal)\n end\n end\n\n def calc_alternative_probabilities(winning_counts, number_of_simulations)\n alternative_probabilities = {}\n winning_counts.each do |alternative, wins|\n alternative_probabilities[alternative] = wins / number_of_simulations.to_f\n end\n alternative_probabilities\n end\n\n def count_simulated_wins(winning_alternatives)\n # initialize a hash to keep track of winning alternative in simulations\n winning_counts = {}\n alternatives.each do |alternative|\n winning_counts[alternative] = 0\n end\n # count number of times each alternative won, calculate probabilities, place in hash\n winning_alternatives.each do |alternative|\n winning_counts[alternative] += 1\n end\n winning_counts\n end\n\n def find_simulated_winner(simulated_cr_hash)\n # figure out which alternative had the highest simulated conversion rate\n winning_pair = [\"\", 0.0]\n simulated_cr_hash.each do |alternative, rate|\n if rate > winning_pair[1]\n winning_pair = [alternative, rate]\n end\n end\n winner = winning_pair[0]\n winner\n end\n\n def calc_simulated_conversion_rates(beta_params)\n simulated_cr_hash = {}\n\n # create a hash which has the conversion rate pulled from each alternative's beta distribution\n beta_params.each do |alternative, params|\n alpha = params[0]\n beta = params[1]\n simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)\n simulated_cr_hash[alternative] = simulated_conversion_rate\n end\n\n simulated_cr_hash\n end\n\n def calc_beta_params(goal = nil)\n beta_params = {}\n alternatives.each do |alternative|\n conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)\n alpha = 1 + conversions\n beta = 1 + alternative.participant_count - conversions\n\n params = [alpha, beta]\n\n beta_params[alternative] = params\n end\n beta_params\n end\n\n def calc_time=(time)\n redis.hset(experiment_config_key, :calc_time, time)\n end\n\n def calc_time\n redis.hget(experiment_config_key, :calc_time).to_i\n end\n\n def jstring(goal = nil)\n js_id = if goal.nil?\n name\n else\n name + \"-\" + goal\n end\n js_id.gsub(\"/\", \"--\")\n end\n\n def cohorting_disabled?\n @cohorting_disabled ||= begin\n value = redis.hget(experiment_config_key, :cohorting)\n value.nil? ? false : value.downcase == \"true\"\n end\n end\n\n def disable_cohorting\n @cohorting_disabled = true\n redis.hset(experiment_config_key, :cohorting, true.to_s)\n end\n\n def enable_cohorting\n @cohorting_disabled = false\n redis.hset(experiment_config_key, :cohorting, false.to_s)\n end\n\n protected\n def experiment_config_key\n \"experiment_configurations/#{@name}\"\n end\n\n def load_metadata_from_configuration\n Split.configuration.experiment_for(@name)[:metadata]\n end\n\n def load_metadata_from_redis\n meta = redis.get(metadata_key)\n JSON.parse(meta) unless meta.nil?\n end\n\n def load_alternatives_from_configuration\n alts = Split.configuration.experiment_for(@name)[:alternatives]\n raise ArgumentError, \"Experiment configuration is missing :alternatives array\" unless alts\n if alts.is_a?(Hash)\n alts.keys\n else\n alts.flatten\n end\n end\n\n def load_alternatives_from_redis\n alternatives = redis.lrange(@name, 0, -1)\n alternatives.map do |alt|\n alt = begin\n JSON.parse(alt)\n rescue\n alt\n end\n Split::Alternative.new(alt, @name)\n end\n end\n\n private\n def redis\n Split.redis\n end\n\n def redis_interface\n RedisInterface.new\n end\n\n def persist_experiment_configuration\n redis_interface.add_to_set(:experiments, name)\n redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })\n goals_collection.save\n\n if @metadata\n redis.set(metadata_key, @metadata.to_json)\n else\n delete_metadata\n end\n end\n\n def remove_experiment_configuration\n @alternatives.each(&:delete)\n goals_collection.delete\n delete_metadata\n redis.del(@name)\n end\n\n def experiment_configuration_has_changed?\n existing_experiment = Experiment.find(@name)\n\n existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||\n existing_experiment.goals != @goals ||\n existing_experiment.metadata != @metadata\n end\n\n def goals_collection\n Split::GoalsCollection.new(@name, @goals)\n end\n\n def remove_experiment_cohorting\n @cohorting_disabled = false\n redis.hdel(experiment_config_key, :cohorting)\n end\n end\nend\n\n Remove unecessary code from Experiment#save\n\nExperiment#save_metadata is called in the previous line, which\nresulted in calling `Split.redis.set(metadata_key, @metadata.to_json)`\ntwice. I removed the unecessary line.\n\n @@ -86,7 +86,6 @@ module Split\n @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)}\n goals_collection.save\n save_metadata\n- Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil?\n else\n existing_alternatives = load_alternatives_from_redis\n existing_goals = Split::GoalsCollection.new(@name).load_from_redis\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Remove unecessary code from Experiment#save"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675163,"cells":{"id":{"kind":"string","value":"10070813"},"text":{"kind":"string","value":" experiment.rb\n # frozen_string_literal: true\n\nmodule Split\n class Experiment\n attr_accessor :name\n attr_accessor :goals\n attr_accessor :alternative_probabilities\n attr_accessor :metadata\n\n attr_reader :alternatives\n attr_reader :resettable\n\n DEFAULT_OPTIONS = {\n resettable: true\n }\n\n def self.find(name)\n Split.cache(:experiments, name) do\n return unless Split.redis.exists?(name)\n Experiment.new(name).tap { |exp| exp.load_from_redis }\n end\n end\n\n def initialize(name, options = {})\n options = DEFAULT_OPTIONS.merge(options)\n\n @name = name.to_s\n\n extract_alternatives_from_options(options)\n end\n\n def self.finished_key(key)\n \"#{key}:finished\"\n end\n\n def set_alternatives_and_options(options)\n options_with_defaults = DEFAULT_OPTIONS.merge(\n options.reject { |k, v| v.nil? }\n )\n\n self.alternatives = options_with_defaults[:alternatives]\n self.goals = options_with_defaults[:goals]\n self.resettable = options_with_defaults[:resettable]\n self.algorithm = options_with_defaults[:algorithm]\n self.metadata = options_with_defaults[:metadata]\n end\n\n def extract_alternatives_from_options(options)\n alts = options[:alternatives] || []\n\n if alts.length == 1\n if alts[0].is_a? Hash\n alts = alts[0].map { |k, v| { k => v } }\n end\n end\n\n if alts.empty?\n exp_config = Split.configuration.experiment_for(name)\n if exp_config\n alts = load_alternatives_from_configuration\n options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration\n options[:metadata] = load_metadata_from_configuration\n options[:resettable] = exp_config[:resettable]\n options[:algorithm] = exp_config[:algorithm]\n end\n end\n\n options[:alternatives] = alts\n\n set_alternatives_and_options(options)\n\n # calculate probability that each alternative is the winner\n @alternative_probabilities = {}\n alts\n end\n\n def save\n validate!\n\n if new_record?\n start unless Split.configuration.start_manually\n persist_experiment_configuration\n elsif experiment_configuration_has_changed?\n reset unless Split.configuration.reset_manually\n persist_experiment_configuration\n @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)}\n goals_collection.save\n save_metadata\n Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil?\n else\n existing_alternatives = load_alternatives_from_redis\n existing_goals = Split::GoalsCollection.new(@name).load_from_redis\n\n def validate!\n if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?\n raise ExperimentNotFound.new(\"Experiment #{@name} not found\")\n end\n @alternatives.each { |a| a.validate! }\n goals_collection.validate!\n end\n\n def new_record?\n ExperimentCatalog.find(name).nil?\n end\n\n def ==(obj)\n self.name == obj.name\n end\n\n def [](name)\n alternatives.find { |a| a.name == name }\n end\n\n def algorithm\n @algorithm ||= Split.configuration.algorithm\n end\n\n def algorithm=(algorithm)\n @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm\n end\n\n def resettable=(resettable)\n @resettable = resettable.is_a?(String) ? resettable == \"true\" : resettable\n end\n\n def alternatives=(alts)\n @alternatives = alts.map do |alternative|\n if alternative.kind_of?(Split::Alternative)\n alternative\n else\n Split::Alternative.new(alternative, @name)\n end\n end\n end\n\n def winner\n Split.cache(:experiment_winner, name) do\n experiment_winner = redis.hget(:experiment_winner, name)\n if experiment_winner\n Split::Alternative.new(experiment_winner, name)\n else\n nil\n end\n end\n end\n\n def has_winner?\n return @has_winner if defined? @has_winner\n @has_winner = !winner.nil?\n end\n\n def winner=(winner_name)\n redis.hset(:experiment_winner, name, winner_name.to_s)\n @has_winner = true\n Split.configuration.on_experiment_winner_choose.call(self)\n end\n\n def participant_count\n alternatives.inject(0) { |sum, a| sum + a.participant_count }\n end\n\n def control\n alternatives.first\n end\n\n def reset_winner\n redis.hdel(:experiment_winner, name)\n @has_winner = false\n Split::Cache.clear_key(@name)\n end\n\n def start\n redis.hset(:experiment_start_times, @name, Time.now.to_i)\n end\n\n def start_time\n Split.cache(:experiment_start_times, @name) do\n t = redis.hget(:experiment_start_times, @name)\n if t\n # Check if stored time is an integer\n if t =~ /^[-+]?[0-9]+$/\n Time.at(t.to_i)\n else\n Time.parse(t)\n end\n end\n end\n end\n\n def next_alternative\n winner || random_alternative\n end\n\n def random_alternative\n if alternatives.length > 1\n algorithm.choose_alternative(self)\n else\n alternatives.first\n end\n end\n\n def version\n @version ||= (redis.get(\"#{name}:version\").to_i || 0)\n end\n\n def increment_version\n @version = redis.incr(\"#{name}:version\")\n end\n\n def key\n if version.to_i > 0\n \"#{name}:#{version}\"\n else\n name\n end\n end\n\n def goals_key\n \"#{name}:goals\"\n end\n\n def finished_key\n self.class.finished_key(key)\n end\n\n def metadata_key\n \"#{name}:metadata\"\n end\n\n def resettable?\n resettable\n end\n\n def reset\n Split.configuration.on_before_experiment_reset.call(self)\n Split::Cache.clear_key(@name)\n alternatives.each(&:reset)\n reset_winner\n Split.configuration.on_experiment_reset.call(self)\n increment_version\n end\n\n def delete\n Split.configuration.on_before_experiment_delete.call(self)\n if Split.configuration.start_manually\n redis.hdel(:experiment_start_times, @name)\n end\n reset_winner\n redis.srem(:experiments, name)\n remove_experiment_cohorting\n remove_experiment_configuration\n Split.configuration.on_experiment_delete.call(self)\n increment_version\n end\n\n def delete_metadata\n redis.del(metadata_key)\n end\n\n def load_from_redis\n exp_config = redis.hgetall(experiment_config_key)\n\n options = {\n resettable: exp_config[\"resettable\"],\n algorithm: exp_config[\"algorithm\"],\n alternatives: load_alternatives_from_redis,\n goals: Split::GoalsCollection.new(@name).load_from_redis,\n metadata: load_metadata_from_redis\n }\n\n set_alternatives_and_options(options)\n end\n\n def calc_winning_alternatives\n # Cache the winning alternatives so we recalculate them once per the specified interval.\n intervals_since_epoch =\n Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval\n\n if self.calc_time != intervals_since_epoch\n if goals.empty?\n self.estimate_winning_alternative\n else\n goals.each do |goal|\n self.estimate_winning_alternative(goal)\n end\n end\n\n self.calc_time = intervals_since_epoch\n\n self.save\n end\n end\n\n def estimate_winning_alternative(goal = nil)\n # initialize a hash of beta distributions based on the alternatives' conversion rates\n beta_params = calc_beta_params(goal)\n\n winning_alternatives = []\n\n Split.configuration.beta_probability_simulations.times do\n # calculate simulated conversion rates from the beta distributions\n simulated_cr_hash = calc_simulated_conversion_rates(beta_params)\n\n winning_alternative = find_simulated_winner(simulated_cr_hash)\n\n # push the winning pair to the winning_alternatives array\n winning_alternatives.push(winning_alternative)\n end\n\n winning_counts = count_simulated_wins(winning_alternatives)\n\n @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)\n\n write_to_alternatives(goal)\n\n self.save\n end\n\n def write_to_alternatives(goal = nil)\n alternatives.each do |alternative|\n alternative.set_p_winner(@alternative_probabilities[alternative], goal)\n end\n end\n\n def calc_alternative_probabilities(winning_counts, number_of_simulations)\n alternative_probabilities = {}\n winning_counts.each do |alternative, wins|\n alternative_probabilities[alternative] = wins / number_of_simulations.to_f\n end\n alternative_probabilities\n end\n\n def count_simulated_wins(winning_alternatives)\n # initialize a hash to keep track of winning alternative in simulations\n winning_counts = {}\n alternatives.each do |alternative|\n winning_counts[alternative] = 0\n end\n # count number of times each alternative won, calculate probabilities, place in hash\n winning_alternatives.each do |alternative|\n winning_counts[alternative] += 1\n end\n winning_counts\n end\n\n def find_simulated_winner(simulated_cr_hash)\n # figure out which alternative had the highest simulated conversion rate\n winning_pair = [\"\", 0.0]\n simulated_cr_hash.each do |alternative, rate|\n if rate > winning_pair[1]\n winning_pair = [alternative, rate]\n end\n end\n winner = winning_pair[0]\n winner\n end\n\n def calc_simulated_conversion_rates(beta_params)\n simulated_cr_hash = {}\n\n # create a hash which has the conversion rate pulled from each alternative's beta distribution\n beta_params.each do |alternative, params|\n alpha = params[0]\n beta = params[1]\n simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)\n simulated_cr_hash[alternative] = simulated_conversion_rate\n end\n\n simulated_cr_hash\n end\n\n def calc_beta_params(goal = nil)\n beta_params = {}\n alternatives.each do |alternative|\n conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)\n alpha = 1 + conversions\n beta = 1 + alternative.participant_count - conversions\n\n params = [alpha, beta]\n\n beta_params[alternative] = params\n end\n beta_params\n end\n\n def calc_time=(time)\n redis.hset(experiment_config_key, :calc_time, time)\n end\n\n def calc_time\n redis.hget(experiment_config_key, :calc_time).to_i\n end\n\n def jstring(goal = nil)\n js_id = if goal.nil?\n name\n else\n name + \"-\" + goal\n end\n js_id.gsub(\"/\", \"--\")\n end\n\n def cohorting_disabled?\n @cohorting_disabled ||= begin\n value = redis.hget(experiment_config_key, :cohorting)\n value.nil? ? false : value.downcase == \"true\"\n end\n end\n\n def disable_cohorting\n @cohorting_disabled = true\n redis.hset(experiment_config_key, :cohorting, true.to_s)\n end\n\n def enable_cohorting\n @cohorting_disabled = false\n redis.hset(experiment_config_key, :cohorting, false.to_s)\n end\n\n protected\n def experiment_config_key\n \"experiment_configurations/#{@name}\"\n end\n\n def load_metadata_from_configuration\n Split.configuration.experiment_for(@name)[:metadata]\n end\n\n def load_metadata_from_redis\n meta = redis.get(metadata_key)\n JSON.parse(meta) unless meta.nil?\n end\n\n def load_alternatives_from_configuration\n alts = Split.configuration.experiment_for(@name)[:alternatives]\n raise ArgumentError, \"Experiment configuration is missing :alternatives array\" unless alts\n if alts.is_a?(Hash)\n alts.keys\n else\n alts.flatten\n end\n end\n\n def load_alternatives_from_redis\n alternatives = redis.lrange(@name, 0, -1)\n alternatives.map do |alt|\n alt = begin\n JSON.parse(alt)\n rescue\n alt\n end\n Split::Alternative.new(alt, @name)\n end\n end\n\n private\n def redis\n Split.redis\n end\n\n def redis_interface\n RedisInterface.new\n end\n\n def persist_experiment_configuration\n redis_interface.add_to_set(:experiments, name)\n redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })\n goals_collection.save\n\n if @metadata\n redis.set(metadata_key, @metadata.to_json)\n else\n delete_metadata\n end\n end\n\n def remove_experiment_configuration\n @alternatives.each(&:delete)\n goals_collection.delete\n delete_metadata\n redis.del(@name)\n end\n\n def experiment_configuration_has_changed?\n existing_experiment = Experiment.find(@name)\n\n existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||\n existing_experiment.goals != @goals ||\n existing_experiment.metadata != @metadata\n end\n\n def goals_collection\n Split::GoalsCollection.new(@name, @goals)\n end\n\n def remove_experiment_cohorting\n @cohorting_disabled = false\n redis.hdel(experiment_config_key, :cohorting)\n end\n end\nend\n\n Remove unecessary code from Experiment#save\n\nExperiment#save_metadata is called in the previous line, which\nresulted in calling `Split.redis.set(metadata_key, @metadata.to_json)`\ntwice. I removed the unecessary line.\n\n @@ -86,7 +86,6 @@ module Split\n @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)}\n goals_collection.save\n save_metadata\n- Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil?\n else\n existing_alternatives = load_alternatives_from_redis\n existing_goals = Split::GoalsCollection.new(@name).load_from_redis\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Remove unecessary code from Experiment#save"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675164,"cells":{"id":{"kind":"string","value":"10070814"},"text":{"kind":"string","value":" experiment.rb\n # frozen_string_literal: true\n\nmodule Split\n class Experiment\n attr_accessor :name\n attr_accessor :goals\n attr_accessor :alternative_probabilities\n attr_accessor :metadata\n\n attr_reader :alternatives\n attr_reader :resettable\n\n DEFAULT_OPTIONS = {\n resettable: true\n }\n\n def self.find(name)\n Split.cache(:experiments, name) do\n return unless Split.redis.exists?(name)\n Experiment.new(name).tap { |exp| exp.load_from_redis }\n end\n end\n\n def initialize(name, options = {})\n options = DEFAULT_OPTIONS.merge(options)\n\n @name = name.to_s\n\n extract_alternatives_from_options(options)\n end\n\n def self.finished_key(key)\n \"#{key}:finished\"\n end\n\n def set_alternatives_and_options(options)\n options_with_defaults = DEFAULT_OPTIONS.merge(\n options.reject { |k, v| v.nil? }\n )\n\n self.alternatives = options_with_defaults[:alternatives]\n self.goals = options_with_defaults[:goals]\n self.resettable = options_with_defaults[:resettable]\n self.algorithm = options_with_defaults[:algorithm]\n self.metadata = options_with_defaults[:metadata]\n end\n\n def extract_alternatives_from_options(options)\n alts = options[:alternatives] || []\n\n if alts.length == 1\n if alts[0].is_a? Hash\n alts = alts[0].map { |k, v| { k => v } }\n end\n end\n\n if alts.empty?\n exp_config = Split.configuration.experiment_for(name)\n if exp_config\n alts = load_alternatives_from_configuration\n options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration\n options[:metadata] = load_metadata_from_configuration\n options[:resettable] = exp_config[:resettable]\n options[:algorithm] = exp_config[:algorithm]\n end\n end\n\n options[:alternatives] = alts\n\n set_alternatives_and_options(options)\n\n # calculate probability that each alternative is the winner\n @alternative_probabilities = {}\n alts\n end\n\n def save\n validate!\n\n if new_record?\n start unless Split.configuration.start_manually\n persist_experiment_configuration\n elsif experiment_configuration_has_changed?\n reset unless Split.configuration.reset_manually\n persist_experiment_configuration\n @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)}\n goals_collection.save\n save_metadata\n Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil?\n else\n existing_alternatives = load_alternatives_from_redis\n existing_goals = Split::GoalsCollection.new(@name).load_from_redis\n\n def validate!\n if @alternatives.empty? && Split.configuration.experiment_for(@name).nil?\n raise ExperimentNotFound.new(\"Experiment #{@name} not found\")\n end\n @alternatives.each { |a| a.validate! }\n goals_collection.validate!\n end\n\n def new_record?\n ExperimentCatalog.find(name).nil?\n end\n\n def ==(obj)\n self.name == obj.name\n end\n\n def [](name)\n alternatives.find { |a| a.name == name }\n end\n\n def algorithm\n @algorithm ||= Split.configuration.algorithm\n end\n\n def algorithm=(algorithm)\n @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm\n end\n\n def resettable=(resettable)\n @resettable = resettable.is_a?(String) ? resettable == \"true\" : resettable\n end\n\n def alternatives=(alts)\n @alternatives = alts.map do |alternative|\n if alternative.kind_of?(Split::Alternative)\n alternative\n else\n Split::Alternative.new(alternative, @name)\n end\n end\n end\n\n def winner\n Split.cache(:experiment_winner, name) do\n experiment_winner = redis.hget(:experiment_winner, name)\n if experiment_winner\n Split::Alternative.new(experiment_winner, name)\n else\n nil\n end\n end\n end\n\n def has_winner?\n return @has_winner if defined? @has_winner\n @has_winner = !winner.nil?\n end\n\n def winner=(winner_name)\n redis.hset(:experiment_winner, name, winner_name.to_s)\n @has_winner = true\n Split.configuration.on_experiment_winner_choose.call(self)\n end\n\n def participant_count\n alternatives.inject(0) { |sum, a| sum + a.participant_count }\n end\n\n def control\n alternatives.first\n end\n\n def reset_winner\n redis.hdel(:experiment_winner, name)\n @has_winner = false\n Split::Cache.clear_key(@name)\n end\n\n def start\n redis.hset(:experiment_start_times, @name, Time.now.to_i)\n end\n\n def start_time\n Split.cache(:experiment_start_times, @name) do\n t = redis.hget(:experiment_start_times, @name)\n if t\n # Check if stored time is an integer\n if t =~ /^[-+]?[0-9]+$/\n Time.at(t.to_i)\n else\n Time.parse(t)\n end\n end\n end\n end\n\n def next_alternative\n winner || random_alternative\n end\n\n def random_alternative\n if alternatives.length > 1\n algorithm.choose_alternative(self)\n else\n alternatives.first\n end\n end\n\n def version\n @version ||= (redis.get(\"#{name}:version\").to_i || 0)\n end\n\n def increment_version\n @version = redis.incr(\"#{name}:version\")\n end\n\n def key\n if version.to_i > 0\n \"#{name}:#{version}\"\n else\n name\n end\n end\n\n def goals_key\n \"#{name}:goals\"\n end\n\n def finished_key\n self.class.finished_key(key)\n end\n\n def metadata_key\n \"#{name}:metadata\"\n end\n\n def resettable?\n resettable\n end\n\n def reset\n Split.configuration.on_before_experiment_reset.call(self)\n Split::Cache.clear_key(@name)\n alternatives.each(&:reset)\n reset_winner\n Split.configuration.on_experiment_reset.call(self)\n increment_version\n end\n\n def delete\n Split.configuration.on_before_experiment_delete.call(self)\n if Split.configuration.start_manually\n redis.hdel(:experiment_start_times, @name)\n end\n reset_winner\n redis.srem(:experiments, name)\n remove_experiment_cohorting\n remove_experiment_configuration\n Split.configuration.on_experiment_delete.call(self)\n increment_version\n end\n\n def delete_metadata\n redis.del(metadata_key)\n end\n\n def load_from_redis\n exp_config = redis.hgetall(experiment_config_key)\n\n options = {\n resettable: exp_config[\"resettable\"],\n algorithm: exp_config[\"algorithm\"],\n alternatives: load_alternatives_from_redis,\n goals: Split::GoalsCollection.new(@name).load_from_redis,\n metadata: load_metadata_from_redis\n }\n\n set_alternatives_and_options(options)\n end\n\n def calc_winning_alternatives\n # Cache the winning alternatives so we recalculate them once per the specified interval.\n intervals_since_epoch =\n Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval\n\n if self.calc_time != intervals_since_epoch\n if goals.empty?\n self.estimate_winning_alternative\n else\n goals.each do |goal|\n self.estimate_winning_alternative(goal)\n end\n end\n\n self.calc_time = intervals_since_epoch\n\n self.save\n end\n end\n\n def estimate_winning_alternative(goal = nil)\n # initialize a hash of beta distributions based on the alternatives' conversion rates\n beta_params = calc_beta_params(goal)\n\n winning_alternatives = []\n\n Split.configuration.beta_probability_simulations.times do\n # calculate simulated conversion rates from the beta distributions\n simulated_cr_hash = calc_simulated_conversion_rates(beta_params)\n\n winning_alternative = find_simulated_winner(simulated_cr_hash)\n\n # push the winning pair to the winning_alternatives array\n winning_alternatives.push(winning_alternative)\n end\n\n winning_counts = count_simulated_wins(winning_alternatives)\n\n @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations)\n\n write_to_alternatives(goal)\n\n self.save\n end\n\n def write_to_alternatives(goal = nil)\n alternatives.each do |alternative|\n alternative.set_p_winner(@alternative_probabilities[alternative], goal)\n end\n end\n\n def calc_alternative_probabilities(winning_counts, number_of_simulations)\n alternative_probabilities = {}\n winning_counts.each do |alternative, wins|\n alternative_probabilities[alternative] = wins / number_of_simulations.to_f\n end\n alternative_probabilities\n end\n\n def count_simulated_wins(winning_alternatives)\n # initialize a hash to keep track of winning alternative in simulations\n winning_counts = {}\n alternatives.each do |alternative|\n winning_counts[alternative] = 0\n end\n # count number of times each alternative won, calculate probabilities, place in hash\n winning_alternatives.each do |alternative|\n winning_counts[alternative] += 1\n end\n winning_counts\n end\n\n def find_simulated_winner(simulated_cr_hash)\n # figure out which alternative had the highest simulated conversion rate\n winning_pair = [\"\", 0.0]\n simulated_cr_hash.each do |alternative, rate|\n if rate > winning_pair[1]\n winning_pair = [alternative, rate]\n end\n end\n winner = winning_pair[0]\n winner\n end\n\n def calc_simulated_conversion_rates(beta_params)\n simulated_cr_hash = {}\n\n # create a hash which has the conversion rate pulled from each alternative's beta distribution\n beta_params.each do |alternative, params|\n alpha = params[0]\n beta = params[1]\n simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta)\n simulated_cr_hash[alternative] = simulated_conversion_rate\n end\n\n simulated_cr_hash\n end\n\n def calc_beta_params(goal = nil)\n beta_params = {}\n alternatives.each do |alternative|\n conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal)\n alpha = 1 + conversions\n beta = 1 + alternative.participant_count - conversions\n\n params = [alpha, beta]\n\n beta_params[alternative] = params\n end\n beta_params\n end\n\n def calc_time=(time)\n redis.hset(experiment_config_key, :calc_time, time)\n end\n\n def calc_time\n redis.hget(experiment_config_key, :calc_time).to_i\n end\n\n def jstring(goal = nil)\n js_id = if goal.nil?\n name\n else\n name + \"-\" + goal\n end\n js_id.gsub(\"/\", \"--\")\n end\n\n def cohorting_disabled?\n @cohorting_disabled ||= begin\n value = redis.hget(experiment_config_key, :cohorting)\n value.nil? ? false : value.downcase == \"true\"\n end\n end\n\n def disable_cohorting\n @cohorting_disabled = true\n redis.hset(experiment_config_key, :cohorting, true.to_s)\n end\n\n def enable_cohorting\n @cohorting_disabled = false\n redis.hset(experiment_config_key, :cohorting, false.to_s)\n end\n\n protected\n def experiment_config_key\n \"experiment_configurations/#{@name}\"\n end\n\n def load_metadata_from_configuration\n Split.configuration.experiment_for(@name)[:metadata]\n end\n\n def load_metadata_from_redis\n meta = redis.get(metadata_key)\n JSON.parse(meta) unless meta.nil?\n end\n\n def load_alternatives_from_configuration\n alts = Split.configuration.experiment_for(@name)[:alternatives]\n raise ArgumentError, \"Experiment configuration is missing :alternatives array\" unless alts\n if alts.is_a?(Hash)\n alts.keys\n else\n alts.flatten\n end\n end\n\n def load_alternatives_from_redis\n alternatives = redis.lrange(@name, 0, -1)\n alternatives.map do |alt|\n alt = begin\n JSON.parse(alt)\n rescue\n alt\n end\n Split::Alternative.new(alt, @name)\n end\n end\n\n private\n def redis\n Split.redis\n end\n\n def redis_interface\n RedisInterface.new\n end\n\n def persist_experiment_configuration\n redis_interface.add_to_set(:experiments, name)\n redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json })\n goals_collection.save\n\n if @metadata\n redis.set(metadata_key, @metadata.to_json)\n else\n delete_metadata\n end\n end\n\n def remove_experiment_configuration\n @alternatives.each(&:delete)\n goals_collection.delete\n delete_metadata\n redis.del(@name)\n end\n\n def experiment_configuration_has_changed?\n existing_experiment = Experiment.find(@name)\n\n existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) ||\n existing_experiment.goals != @goals ||\n existing_experiment.metadata != @metadata\n end\n\n def goals_collection\n Split::GoalsCollection.new(@name, @goals)\n end\n\n def remove_experiment_cohorting\n @cohorting_disabled = false\n redis.hdel(experiment_config_key, :cohorting)\n end\n end\nend\n\n Remove unecessary code from Experiment#save\n\nExperiment#save_metadata is called in the previous line, which\nresulted in calling `Split.redis.set(metadata_key, @metadata.to_json)`\ntwice. I removed the unecessary line.\n\n @@ -86,7 +86,6 @@ module Split\n @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)}\n goals_collection.save\n save_metadata\n- Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil?\n else\n existing_alternatives = load_alternatives_from_redis\n existing_goals = Split::GoalsCollection.new(@name).load_from_redis\n"},"addition_count":{"kind":"number","value":0,"string":"0"},"commit_subject":{"kind":"string","value":"Remove unecessary code from Experiment#save"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675165,"cells":{"id":{"kind":"string","value":"10070815"},"text":{"kind":"string","value":" expand.ts\n import { strictEqual as equal } from 'assert';\nimport expand, { resolveConfig } from '../src';\n\ndescribe('Expand Abbreviation', () => {\n describe('Markup', () => {\n it('basic', () => {\n equal(expand('ul>.item$*2'), '
    \\n\\t
  • \\n\\t
  • \\n
');\n\n // insert text into abbreviation\n equal(expand('ul>.item$*', { text: ['foo', 'bar'] }), '
    \\n\\t
  • foo
  • \\n\\t
  • bar
  • \\n
');\n\n // insert TextMate-style fields/tabstops in output\n equal(expand('ul>.item$*2', {\n options: {\n 'output.field': (index, placeholder) => `\\${${index}${placeholder ? ':' + placeholder : ''}}`\n }\n }), '
    \\n\\t
  • ${1}
  • \\n\\t
  • ${2}
  • \\n
');\n });\n\n it('attributes', () => {\n const snippets = {\n test: 'test[!foo bar. baz={}]'\n };\n const opt = { snippets };\n const reverse = {\n options: { 'output.reverseAttributes': true },\n snippets\n };\n\n equal(expand('a.test'), '');\n equal(expand('a.test', reverse), '');\n\n equal(expand('test', opt), '');\n equal(expand('test[foo]', opt), '');\n equal(expand('test[baz=a foo=1]', opt), '');\n\n equal(expand('map'), '');\n equal(expand('map[]'), '');\n equal(expand('map[name=\"valid\"]'), '');\n equal(expand('map[href=\"invalid\"]'), '');\n\n // Apply attributes in reverse order\n equal(expand('test', reverse), '');\n equal(expand('test[foo]', reverse), '');\n equal(expand('test[baz=a foo=1]', reverse), '');\n });\n\n it('numbering', () => {\n equal(expand('ul>li.item$@-*5'), '
    \\n\\t
  • \\n\\t
  • \\n\\t
  • \\n\\t
  • \\n\\t
  • \\n
');\n });\n\n it('syntax', () => {\n equal(expand('ul>.item$*2', { syntax: 'html' }), '
    \\n\\t
  • \\n\\t
  • \\n
');\n equal(expand('ul>.item$*2', { syntax: 'slim' }), 'ul\\n\\tli.item1 \\n\\tli.item2 ');\n equal(expand('xsl:variable[name=a select=b]>div', { syntax: 'xsl' }), '\\n\\t
\\n
');\n });\n\n it('custom profile', () => {\n equal(expand('img'), '\"\"');\n equal(expand('img', { options: { 'output.selfClosingStyle': 'xhtml' } }), '\"\"');\n });\n\n it('custom variables', () => {\n const variables = { charset: 'ru-RU' };\n\n equal(expand('[charset=${charset}]{${charset}}'), '
UTF-8
');\n equal(expand('[charset=${charset}]{${charset}}', { variables }), '
ru-RU
');\n });\n\n it('custom snippets', () => {\n const snippets = {\n link: 'link[foo=bar href]/',\n foo: '.foo[bar=baz]',\n repeat: 'div>ul>li{Hello World}*3'\n };\n\n equal(expand('foo', { snippets }), '
');\n\n // `link:css` depends on `link` snippet so changing it will result in\n // altered `link:css` result\n equal(expand('link:css'), '');\n equal(expand('link:css', { snippets }), '');\n\n // https://github.com/emmetio/emmet/issues/468\n equal(expand('repeat', { snippets }), '
\\n\\t
    \\n\\t\\t
  • Hello World
  • \\n\\t\\t
  • Hello World
  • \\n\\t\\t
  • Hello World
  • \\n\\t
\\n
');\n });\n\n it('formatter options', () => {\n equal(expand('ul>.item$*2'), '
    \\n\\t
  • \\n\\t
  • \\n
');\n equal(expand('ul>.item$*2', { options: { 'comment.enabled': true } }),\n '
    \\n\\t
  • \\n\\t\\n\\t
  • \\n\\t\\n
');\n\n equal(expand('div>p'), '
\\n\\t

\\n
');\n equal(expand('div>p', { options: { 'output.formatLeafNode': true } }), '
\\n\\t

\\n\\t\\t\\n\\t

\\n
');\n });\n\n it('JSX', () => {\n const config = { syntax: 'jsx' };\n equal(expand('div#foo.bar', config), '
');\n equal(expand('label[for=a]', config), '');\n equal(expand('Foo.Bar', config), '');\n equal(expand('div.{theme.style}', config), '
');\n });\n\n it('override attributes', () => {\n const config = { syntax: 'jsx' };\n equal(expand('.bar', config), '
');\n equal(expand('..bar', config), '
');\n equal(expand('..foo-bar', config), '
');\n\n equal(expand('.foo', { syntax: 'vue' }), '
');\n equal(expand('..foo', { syntax: 'vue' }), '
');\n });\n\n it('wrap with abbreviation', () => {\n equal(expand('div>ul', { text: ['
line1
\\n
line2
'] }),\n '
\\n\\t
    \\n\\t\\t
    line1
    \\n\\t\\t
    line2
    \\n\\t
\\n
');\n equal(expand('p', { text: 'foo\\nbar' }), '

\\n\\tfoo\\n\\tbar\\n

');\n equal(expand('p', { text: '
foo
' }), '

\\n\\t

foo
\\n

');\n equal(expand('p', { text: 'foo' }), '

foo

');\n equal(expand('p', { text: 'foofoo' }), '

foofoo

');\n equal(expand('p', { text: 'foo
foo
' }), '

foo

foo

');\n });\n\n it('wrap with abbreviation href', () => {\n equal(expand('a', { text: ['www.google.it'] }), 'www.google.it');\n equal(expand('a', { text: ['then www.google.it'] }), 'then www.google.it');\n equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), 'www.google.it');\n\n equal(expand('map[name=\"https://example.com\"]', { text: ['some text'] }),\n 'some text');\n equal(expand('map[href=\"https://example.com\"]', { text: ['some text'] }),\n 'some text');\n equal(expand('map[name=\"https://example.com\"]>b', { text: ['some text'] }),\n 'some text');\n\n equal(expand('a[href=\"https://example.com\"]>b', { text: ['some text false'], options: { 'markup.href': false } }),\n 'some text false');\n equal(expand('a[href=\"https://example.com\"]>b', { text: ['some text true'], options: { 'markup.href': true } }),\n 'some text true');\n equal(expand('a[href=\"https://example.com\"]>div', { text: ['

some text false

'], options: { 'markup.href': false } }),\n '\\n\\t
\\n\\t\\t

some text false

\\n\\t
\\n
');\n equal(expand('a[href=\"https://example.com\"]>div', { text: ['

some text true

'], options: { 'markup.href': true } }),\n '\\n\\t
\\n\\t\\t

some text true

\\n\\t
\\n
');\n });\n\n // it.only('debug', () => {\n // equal(expand('link:css'), '');\n // });\n });\n\n describe('Pug templates', () => {\n const config = resolveConfig({ syntax: 'pug' });\n it('basic', () => {\n equal(expand('!', config), 'doctype html\\nhtml(lang=\"en\")\\n\\thead\\n\\t\\tmeta(charset=\"UTF-8\")\\n\\t\\tmeta(http-equiv=\"X-UA-Compatible\", content=\"IE=edge\")\\n\\t\\tmeta(name=\"viewport\", content=\"width=device-width, initial-scale=1.0\")\\n\\t\\ttitle Document\\n\\tbody ');\n });\n });\n});\n\n Removed caching in markup abbreviation parser\n\nNaive implementation causes side-effects like incorrect repeater and inserted child content\n\n @@ -4,6 +4,7 @@ import expand, { resolveConfig } from '../src';\n describe('Expand Abbreviation', () => {\n describe('Markup', () => {\n it('basic', () => {\n+ equal(expand('input[value=\"text$\"]*2'), '');\n equal(expand('ul>.item$*2'), '
    \\n\\t
  • \\n\\t
  • \\n
');\n \n // insert text into abbreviation\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Removed caching in markup abbreviation parser"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10675166,"cells":{"id":{"kind":"string","value":"10070816"},"text":{"kind":"string","value":" expand.ts\n import { strictEqual as equal } from 'assert';\nimport expand, { resolveConfig } from '../src';\n\ndescribe('Expand Abbreviation', () => {\n describe('Markup', () => {\n it('basic', () => {\n equal(expand('ul>.item$*2'), '
    \\n\\t
  • \\n\\t
  • \\n
');\n\n // insert text into abbreviation\n equal(expand('ul>.item$*', { text: ['foo', 'bar'] }), '
    \\n\\t
  • foo
  • \\n\\t
  • bar
  • \\n
');\n\n // insert TextMate-style fields/tabstops in output\n equal(expand('ul>.item$*2', {\n options: {\n 'output.field': (index, placeholder) => `\\${${index}${placeholder ? ':' + placeholder : ''}}`\n }\n }), '
    \\n\\t
  • ${1}
  • \\n\\t
  • ${2}
  • \\n
');\n });\n\n it('attributes', () => {\n const snippets = {\n test: 'test[!foo bar. baz={}]'\n };\n const opt = { snippets };\n const reverse = {\n options: { 'output.reverseAttributes': true },\n snippets\n };\n\n equal(expand('a.test'), '');\n equal(expand('a.test', reverse), '');\n\n equal(expand('test', opt), '');\n equal(expand('test[foo]', opt), '');\n equal(expand('test[baz=a foo=1]', opt), '');\n\n equal(expand('map'), '');\n equal(expand('map[]'), '');\n equal(expand('map[name=\"valid\"]'), '');\n equal(expand('map[href=\"invalid\"]'), '');\n\n // Apply attributes in reverse order\n equal(expand('test', reverse), '');\n equal(expand('test[foo]', reverse), '');\n equal(expand('test[baz=a foo=1]', reverse), '');\n });\n\n it('numbering', () => {\n equal(expand('ul>li.item$@-*5'), '
    \\n\\t
  • \\n\\t
  • \\n\\t
  • \\n\\t
  • \\n\\t
  • \\n
');\n });\n\n it('syntax', () => {\n equal(expand('ul>.item$*2', { syntax: 'html' }), '
    \\n\\t
  • \\n\\t
  • \\n
');\n equal(expand('ul>.item$*2', { syntax: 'slim' }), 'ul\\n\\tli.item1 \\n\\tli.item2 ');\n equal(expand('xsl:variable[name=a select=b]>div', { syntax: 'xsl' }), '\\n\\t
\\n
');\n });\n\n it('custom profile', () => {\n equal(expand('img'), '\"\"');\n equal(expand('img', { options: { 'output.selfClosingStyle': 'xhtml' } }), '\"\"');\n });\n\n it('custom variables', () => {\n const variables = { charset: 'ru-RU' };\n\n equal(expand('[charset=${charset}]{${charset}}'), '
UTF-8
');\n equal(expand('[charset=${charset}]{${charset}}', { variables }), '
ru-RU
');\n });\n\n it('custom snippets', () => {\n const snippets = {\n link: 'link[foo=bar href]/',\n foo: '.foo[bar=baz]',\n repeat: 'div>ul>li{Hello World}*3'\n };\n\n equal(expand('foo', { snippets }), '
');\n\n // `link:css` depends on `link` snippet so changing it will result in\n // altered `link:css` result\n equal(expand('link:css'), '');\n equal(expand('link:css', { snippets }), '');\n\n // https://github.com/emmetio/emmet/issues/468\n equal(expand('repeat', { snippets }), '
\\n\\t
    \\n\\t\\t
  • Hello World
  • \\n\\t\\t
  • Hello World
  • \\n\\t\\t
  • Hello World
  • \\n\\t
\\n
');\n });\n\n it('formatter options', () => {\n equal(expand('ul>.item$*2'), '
    \\n\\t
  • \\n\\t
  • \\n
');\n equal(expand('ul>.item$*2', { options: { 'comment.enabled': true } }),\n '
    \\n\\t
  • \\n\\t\\n\\t
  • \\n\\t\\n
');\n\n equal(expand('div>p'), '
\\n\\t

\\n
');\n equal(expand('div>p', { options: { 'output.formatLeafNode': true } }), '
\\n\\t

\\n\\t\\t\\n\\t

\\n
');\n });\n\n it('JSX', () => {\n const config = { syntax: 'jsx' };\n equal(expand('div#foo.bar', config), '
');\n equal(expand('label[for=a]', config), '');\n equal(expand('Foo.Bar', config), '');\n equal(expand('div.{theme.style}', config), '
');\n });\n\n it('override attributes', () => {\n const config = { syntax: 'jsx' };\n equal(expand('.bar', config), '
');\n equal(expand('..bar', config), '
');\n equal(expand('..foo-bar', config), '
');\n\n equal(expand('.foo', { syntax: 'vue' }), '
');\n equal(expand('..foo', { syntax: 'vue' }), '
');\n });\n\n it('wrap with abbreviation', () => {\n equal(expand('div>ul', { text: ['
line1
\\n
line2
'] }),\n '
\\n\\t
    \\n\\t\\t
    line1
    \\n\\t\\t
    line2
    \\n\\t
\\n
');\n equal(expand('p', { text: 'foo\\nbar' }), '

\\n\\tfoo\\n\\tbar\\n

');\n equal(expand('p', { text: '
foo
' }), '

\\n\\t

foo
\\n

');\n equal(expand('p', { text: 'foo' }), '

foo

');\n equal(expand('p', { text: 'foofoo' }), '

foofoo

');\n equal(expand('p', { text: 'foo
foo
' }), '

foo

foo

');\n });\n\n it('wrap with abbreviation href', () => {\n equal(expand('a', { text: ['www.google.it'] }), 'www.google.it');\n equal(expand('a', { text: ['then www.google.it'] }), 'then www.google.it');\n equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), 'www.google.it');\n\n equal(expand('map[name=\"https://example.com\"]', { text: ['some text'] }),\n 'some text');\n equal(expand('map[href=\"https://example.com\"]', { text: ['some text'] }),\n 'some text');\n equal(expand('map[name=\"https://example.com\"]>b', { text: ['some text'] }),\n 'some text');\n\n equal(expand('a[href=\"https://example.com\"]>b', { text: ['some text false'], options: { 'markup.href': false } }),\n 'some text false');\n equal(expand('a[href=\"https://example.com\"]>b', { text: ['some text true'], options: { 'markup.href': true } }),\n 'some text true');\n equal(expand('a[href=\"https://example.com\"]>div', { text: ['

some text false

'], options: { 'markup.href': false } }),\n '\\n\\t
\\n\\t\\t

some text false

\\n\\t
\\n
');\n equal(expand('a[href=\"https://example.com\"]>div', { text: ['

some text true

'], options: { 'markup.href': true } }),\n '\\n\\t
\\n\\t\\t

some text true

\\n\\t
\\n
');\n });\n\n // it.only('debug', () => {\n // equal(expand('link:css'), '');\n // });\n });\n\n describe('Pug templates', () => {\n const config = resolveConfig({ syntax: 'pug' });\n it('basic', () => {\n equal(expand('!', config), 'doctype html\\nhtml(lang=\"en\")\\n\\thead\\n\\t\\tmeta(charset=\"UTF-8\")\\n\\t\\tmeta(http-equiv=\"X-UA-Compatible\", content=\"IE=edge\")\\n\\t\\tmeta(name=\"viewport\", content=\"width=device-width, initial-scale=1.0\")\\n\\t\\ttitle Document\\n\\tbody ');\n });\n });\n});\n\n Removed caching in markup abbreviation parser\n\nNaive implementation causes side-effects like incorrect repeater and inserted child content\n\n @@ -4,6 +4,7 @@ import expand, { resolveConfig } from '../src';\n describe('Expand Abbreviation', () => {\n describe('Markup', () => {\n it('basic', () => {\n+ equal(expand('input[value=\"text$\"]*2'), '');\n equal(expand('ul>.item$*2'), '
    \\n\\t
  • \\n\\t
  • \\n
');\n \n // insert text into abbreviation\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Removed caching in markup abbreviation parser"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".ts"},"lang":{"kind":"string","value":"ts"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"emmetio/emmet"}}},{"rowIdx":10675167,"cells":{"id":{"kind":"string","value":"10070817"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\nend\n```\n\nBy default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\nend\n```\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #634 from splitrb/add-documentation-on-cookie-storage\n\nAdding documentation related to what is stored on cookies.\n @@ -263,7 +263,7 @@ Split.configure do |config|\n end\n ```\n \n-By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n+When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n \n ```ruby\n Split.configure do |config|\n@@ -272,6 +272,8 @@ Split.configure do |config|\n end\n ```\n \n+The data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n+\n __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n \n #### Redis\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Merge pull request #634 from splitrb/add-documentation-on-cookie-storage"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675168,"cells":{"id":{"kind":"string","value":"10070818"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\nend\n```\n\nBy default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\nend\n```\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #634 from splitrb/add-documentation-on-cookie-storage\n\nAdding documentation related to what is stored on cookies.\n @@ -263,7 +263,7 @@ Split.configure do |config|\n end\n ```\n \n-By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n+When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n \n ```ruby\n Split.configure do |config|\n@@ -272,6 +272,8 @@ Split.configure do |config|\n end\n ```\n \n+The data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n+\n __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n \n #### Redis\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Merge pull request #634 from splitrb/add-documentation-on-cookie-storage"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675169,"cells":{"id":{"kind":"string","value":"10070819"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\nYou now have a Redis daemon running on port `6379`.\n\n### Setup\n\n```bash\ngem install split\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\nend\n```\n\nBy default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\nend\n```\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #634 from splitrb/add-documentation-on-cookie-storage\n\nAdding documentation related to what is stored on cookies.\n @@ -263,7 +263,7 @@ Split.configure do |config|\n end\n ```\n \n-By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n+When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n \n ```ruby\n Split.configure do |config|\n@@ -272,6 +272,8 @@ Split.configure do |config|\n end\n ```\n \n+The data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n+\n __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n \n #### Redis\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Merge pull request #634 from splitrb/add-documentation-on-cookie-storage"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675170,"cells":{"id":{"kind":"string","value":"10070820"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n && typeof options.container === 'undefined') {\n default_meow_area = $(window.document.createElement('div'))\n .attr({'id': ((new Date()).getTime()), 'class': 'meows'});\n $('body').prepend(default_meow_area);\n }\n\n if (meows.size() <= 0) {\n if (typeof options.beforeCreateFirst === 'function') {\n options.beforeCreateFirst.call(that);\n }\n }\n\n if (typeof options.container === 'string') {\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof jQuery) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n // Add title if it's defined\n if (typeof this.title === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('h1')).text(this.title)\n );\n }\n\n // Add icon if it's defined\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (event.type === 'mouseleave') {\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n that.hovered = true;\n that.manifest.addClass('hover');\n }\n });\n\n // Add a timeout if the duration isn't Infinity\n if (this.duration !== Infinity) {\n this.timeout = window.setTimeout(function () {\n // Make sure this meow hasn't already been destroyed\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n if (that.hovered !== true && typeof that === 'object') {\n that.destroy();\n }\n }\n }, that.duration);\n }\n\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n }\n }\n });\n });\n }\n };\n };\n\n $.fn.meow = function (args) {\n var meow = new Meow(args);\n meows.add(meow);\n return meow;\n };\n $.meow = $.fn.meow;\n}(jQuery, window));\n\n Consistently use my own passed-in $\n\n @@ -55,7 +55,7 @@\n \n if (typeof options.message === 'string') {\n this.message = options.message;\n- } else if (options.message instanceof jQuery) {\n+ } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Consistently use my own passed-in $"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10675171,"cells":{"id":{"kind":"string","value":"10070821"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n && typeof options.container === 'undefined') {\n default_meow_area = $(window.document.createElement('div'))\n .attr({'id': ((new Date()).getTime()), 'class': 'meows'});\n $('body').prepend(default_meow_area);\n }\n\n if (meows.size() <= 0) {\n if (typeof options.beforeCreateFirst === 'function') {\n options.beforeCreateFirst.call(that);\n }\n }\n\n if (typeof options.container === 'string') {\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof jQuery) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n // Add title if it's defined\n if (typeof this.title === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('h1')).text(this.title)\n );\n }\n\n // Add icon if it's defined\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (event.type === 'mouseleave') {\n that.hovered = false;\n that.manifest.removeClass('hover');\n // Destroy the mow on mouseleave if it's timed out\n if (that.timestamp + that.duration <= new Date().getTime()) {\n that.destroy();\n }\n } else {\n that.hovered = true;\n that.manifest.addClass('hover');\n }\n });\n\n // Add a timeout if the duration isn't Infinity\n if (this.duration !== Infinity) {\n this.timeout = window.setTimeout(function () {\n // Make sure this meow hasn't already been destroyed\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n if (that.hovered !== true && typeof that === 'object') {\n that.destroy();\n }\n }\n }, that.duration);\n }\n\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n }\n }\n });\n });\n }\n };\n };\n\n $.fn.meow = function (args) {\n var meow = new Meow(args);\n meows.add(meow);\n return meow;\n };\n $.meow = $.fn.meow;\n}(jQuery, window));\n\n Consistently use my own passed-in $\n\n @@ -55,7 +55,7 @@\n \n if (typeof options.message === 'string') {\n this.message = options.message;\n- } else if (options.message instanceof jQuery) {\n+ } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Consistently use my own passed-in $"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10675172,"cells":{"id":{"kind":"string","value":"10070822"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n end\n \n it \"should use the specified algorithm if a winner does not exist\" do\n Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n experiment.next_alternative.name.should eql('green')\n end\n end\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n describe \"changing an existing experiment\" do\n def same_but_different_alternative\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"yellow\", \"orange\")\n end\n\n it \"should reset an experiment if it is loaded with different alternatives\" do\n experiment.save\n blue.participant_count = 5\n same_experiment = same_but_different_alternative\n expect(same_experiment.alternatives.map(&:name)).to eq([\"blue\", \"yellow\", \"orange\"])\n expect(blue.participant_count).to eq(0)\n end\n\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one.\n\n @@ -225,7 +225,8 @@ describe Split::Experiment do\n end\n \n it \"should use the specified algorithm if a winner does not exist\" do\n- Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n+ experiment.algorithm = Split::Algorithms::Whiplash\n+ experiment.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n experiment.next_alternative.name.should eql('green')\n end\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one."},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675173,"cells":{"id":{"kind":"string","value":"10070823"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n end\n \n it \"should use the specified algorithm if a winner does not exist\" do\n Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n experiment.next_alternative.name.should eql('green')\n end\n end\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n describe \"changing an existing experiment\" do\n def same_but_different_alternative\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"yellow\", \"orange\")\n end\n\n it \"should reset an experiment if it is loaded with different alternatives\" do\n experiment.save\n blue.participant_count = 5\n same_experiment = same_but_different_alternative\n expect(same_experiment.alternatives.map(&:name)).to eq([\"blue\", \"yellow\", \"orange\"])\n expect(blue.participant_count).to eq(0)\n end\n\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one.\n\n @@ -225,7 +225,8 @@ describe Split::Experiment do\n end\n \n it \"should use the specified algorithm if a winner does not exist\" do\n- Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n+ experiment.algorithm = Split::Algorithms::Whiplash\n+ experiment.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n experiment.next_alternative.name.should eql('green')\n end\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one."},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675174,"cells":{"id":{"kind":"string","value":"10070824"},"text":{"kind":"string","value":" experiment_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"time\"\n\ndescribe Split::Experiment do\n def new_experiment(goals = [])\n Split::Experiment.new(\"link_color\", alternatives: [\"blue\", \"red\", \"green\"], goals: goals)\n end\n\n def alternative(color)\n Split::Alternative.new(color, \"link_color\")\n end\n\n let(:experiment) { new_experiment }\n\n let(:blue) { alternative(\"blue\") }\n let(:green) { alternative(\"green\") }\n\n context \"with an experiment\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"]) }\n\n it \"should have a name\" do\n expect(experiment.name).to eq(\"basket_text\")\n end\n\n it \"should have alternatives\" do\n expect(experiment.alternatives.length).to be 2\n end\n\n it \"should have alternatives with correct names\" do\n expect(experiment.alternatives.collect { |a| a.name }).to eq([\"Basket\", \"Cart\"])\n end\n\n it \"should be resettable by default\" do\n expect(experiment.resettable).to be_truthy\n end\n\n it \"should save to redis\" do\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n end\n\n it \"should save the start time to redis\" do\n experiment_start_time = Time.at(1372167761)\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should not save the start time to redis when start_manually is enabled\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should save the selected algorithm to redis\" do\n experiment_algorithm = Split::Algorithms::Whiplash\n experiment.algorithm = experiment_algorithm\n experiment.save\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").algorithm).to eq(experiment_algorithm)\n end\n\n it \"should handle having a start time stored as a string\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).twice.and_return(experiment_start_time)\n experiment.save\n Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to eq(experiment_start_time)\n end\n\n it \"should handle not having a start time\" do\n experiment_start_time = Time.parse(\"Sat Mar 03 14:01:03\")\n expect(Time).to receive(:now).and_return(experiment_start_time)\n experiment.save\n\n Split.redis.hdel(:experiment_start_times, experiment.name)\n\n expect(Split::ExperimentCatalog.find(\"basket_text\").start_time).to be_nil\n end\n\n it \"should not create duplicates when saving multiple times\" do\n experiment.save\n experiment.save\n expect(Split.redis.exists?(\"basket_text\")).to be true\n expect(Split.redis.lrange(\"basket_text\", 0, -1)).to eq(['{\"Basket\":1}', '{\"Cart\":1}'])\n end\n\n describe \"new record?\" do\n it \"should know if it hasn't been saved yet\" do\n expect(experiment.new_record?).to be_truthy\n end\n\n it \"should know if it has been saved yet\" do\n experiment.save\n expect(experiment.new_record?).to be_falsey\n end\n end\n\n describe \"control\" do\n it \"should be the first alternative\" do\n experiment.save\n expect(experiment.control.name).to eq(\"Basket\")\n end\n end\n end\n\n describe \"initialization\" do\n it \"should set the algorithm when passed as an option to the initializer\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should be possible to make an experiment not resettable\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n expect(experiment.resettable).to be_falsey\n end\n\n context \"from configuration\" do\n let(:experiment_name) { :my_experiment }\n let(:experiments) do\n {\n experiment_name => {\n alternatives: [\"Control Opt\", \"Alt one\"]\n }\n }\n end\n\n before { Split.configuration.experiments = experiments }\n\n it \"assigns default values to the experiment\" do\n expect(Split::Experiment.new(experiment_name).resettable).to eq(true)\n end\n end\n end\n\n describe \"persistent configuration\" do\n it \"should persist resettable in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], resettable: false)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.resettable).to be_falsey\n end\n\n describe \"#metadata\" do\n let(:experiment) { Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash, metadata: meta) }\n let(:meta) { { a: \"b\" } }\n\n before do\n experiment.save\n end\n\n it \"should delete the key when metadata is removed\" do\n experiment.metadata = nil\n experiment.save\n\n expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey\n end\n\n context \"simple hash\" do\n let(:meta) { { \"basket\" => \"a\", \"cart\" => \"b\" } }\n\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n\n context \"nested hash\" do\n let(:meta) { { \"basket\" => { \"one\" => \"two\" }, \"cart\" => \"b\" } }\n it \"should persist metadata in redis\" do\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.metadata).to eq(meta)\n end\n end\n end\n\n it \"should persist algorithm in redis\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [\"Basket\", \"Cart\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"basket_text\")\n expect(e).to eq(experiment)\n expect(e.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n\n it \"should persist a new experiment in redis, that does not exist in the configuration file\" do\n experiment = Split::Experiment.new(\"foobar\", alternatives: [\"tra\", \"la\"], algorithm: Split::Algorithms::Whiplash)\n experiment.save\n\n e = Split::ExperimentCatalog.find(\"foobar\")\n expect(e).to eq(experiment)\n expect(e.alternatives.collect { |a| a.name }).to eq([\"tra\", \"la\"])\n end\n end\n\n describe \"deleting\" do\n it \"should delete itself\" do\n experiment = Split::Experiment.new(\"basket_text\", alternatives: [ \"Basket\", \"Cart\"])\n experiment.save\n\n experiment.delete\n expect(Split.redis.exists?(\"link_color\")).to be false\n expect(Split::ExperimentCatalog.find(\"link_color\")).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.delete\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_delete hook\" do\n expect(Split.configuration.on_experiment_delete).to receive(:call)\n experiment.delete\n end\n end\n \n it \"should use the specified algorithm if a winner does not exist\" do\n Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n experiment.next_alternative.name.should eql('green')\n end\n end\n experiment.start\n experiment.delete\n expect(experiment.start_time).to be_nil\n end\n\n it \"should default cohorting back to false\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq(true)\n experiment.delete\n expect(experiment.cohorting_disabled?).to eq(false)\n end\n end\n\n describe \"winner\" do\n it \"should have no winner initially\" do\n expect(experiment.winner).to be_nil\n end\n end\n\n describe \"winner=\" do\n it \"should allow you to specify a winner\" do\n experiment.save\n experiment.winner = \"red\"\n expect(experiment.winner.name).to eq(\"red\")\n end\n\n it \"should call the on_experiment_winner_choose hook\" do\n expect(Split.configuration.on_experiment_winner_choose).to receive(:call)\n experiment.winner = \"green\"\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to_not have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.winner = \"red\"\n expect(experiment).to have_winner\n end\n end\n end\n\n describe \"reset_winner\" do\n before { experiment.winner = \"green\" }\n\n it \"should reset the winner\" do\n experiment.reset_winner\n expect(experiment.winner).to be_nil\n end\n\n context \"when has_winner state is memoized\" do\n before { expect(experiment).to have_winner }\n\n it \"should keep has_winner state consistent\" do\n experiment.reset_winner\n expect(experiment).to_not have_winner\n end\n end\n end\n\n describe \"has_winner?\" do\n context \"with winner\" do\n before { experiment.winner = \"red\" }\n\n it \"returns true\" do\n expect(experiment).to have_winner\n end\n end\n\n context \"without winner\" do\n it \"returns false\" do\n expect(experiment).to_not have_winner\n end\n end\n\n it \"memoizes has_winner state\" do\n expect(experiment).to receive(:winner).once\n expect(experiment).to_not have_winner\n expect(experiment).to_not have_winner\n end\n end\n\n describe \"reset\" do\n let(:reset_manually) { false }\n\n before do\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n experiment.save\n green.increment_participation\n green.increment_participation\n end\n\n it \"should reset all alternatives\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n it \"should reset the winner\" do\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n experiment.reset\n\n expect(experiment.winner).to be_nil\n end\n\n it \"should increment the version\" do\n expect(experiment.version).to eq(0)\n experiment.reset\n expect(experiment.version).to eq(1)\n end\n\n it \"should call the on_experiment_reset hook\" do\n expect(Split.configuration.on_experiment_reset).to receive(:call)\n experiment.reset\n end\n\n it \"should call the on_before_experiment_reset hook\" do\n expect(Split.configuration.on_before_experiment_reset).to receive(:call)\n experiment.reset\n end\n end\n\n describe \"algorithm\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n it \"should use the default algorithm if none is specified\" do\n expect(experiment.algorithm).to eq(Split.configuration.algorithm)\n end\n\n it \"should use the user specified algorithm for this experiment if specified\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash)\n end\n end\n\n describe \"#next_alternative\" do\n context \"with multiple alternatives\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\", \"green\") }\n\n context \"with winner\" do\n it \"should always return the winner\" do\n green = Split::Alternative.new(\"green\", \"link_color\")\n experiment.winner = \"green\"\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n green.increment_participation\n\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n\n context \"without winner\" do\n it \"should use the specified algorithm\" do\n experiment.algorithm = Split::Algorithms::Whiplash\n expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new(\"green\", \"link_color\"))\n expect(experiment.next_alternative.name).to eq(\"green\")\n end\n end\n end\n\n context \"with single alternative\" do\n let(:experiment) { Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\") }\n\n it \"should always return the only alternative\" do\n expect(experiment.next_alternative.name).to eq(\"blue\")\n expect(experiment.next_alternative.name).to eq(\"blue\")\n end\n end\n end\n\n describe \"#cohorting_disabled?\" do\n it \"returns false when nothing has been configured\" do\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns true when enable_cohorting is performed\" do\n experiment.enable_cohorting\n expect(experiment.cohorting_disabled?).to eq false\n end\n\n it \"returns false when nothing has been configured\" do\n experiment.disable_cohorting\n expect(experiment.cohorting_disabled?).to eq true\n end\n end\n\n describe \"changing an existing experiment\" do\n def same_but_different_alternative\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"yellow\", \"orange\")\n end\n\n it \"should reset an experiment if it is loaded with different alternatives\" do\n experiment.save\n blue.participant_count = 5\n same_experiment = same_but_different_alternative\n expect(same_experiment.alternatives.map(&:name)).to eq([\"blue\", \"yellow\", \"orange\"])\n expect(blue.participant_count).to eq(0)\n end\n\n it \"should only reset once\" do\n experiment.save\n expect(experiment.version).to eq(0)\n same_experiment = same_but_different_alternative\n expect(same_experiment.version).to eq(1)\n same_experiment_again = same_but_different_alternative\n expect(same_experiment_again.version).to eq(1)\n end\n\n context \"when metadata is changed\" do\n it \"should increase version\" do\n experiment.save\n experiment.metadata = { \"foo\" => \"bar\" }\n\n expect { experiment.save }.to change { experiment.version }.by(1)\n end\n\n it \"does not increase version\" do\n experiment.metadata = nil\n experiment.save\n expect { experiment.save }.to change { experiment.version }.by(0)\n end\n end\n\n context \"when experiment configuration is changed\" do\n let(:reset_manually) { false }\n\n before do\n experiment.save\n allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually)\n green.increment_participation\n green.increment_participation\n experiment.set_alternatives_and_options(alternatives: %w(blue red green zip),\n goals: %w(purchase))\n experiment.save\n end\n\n it \"resets all alternatives\" do\n expect(green.participant_count).to eq(0)\n expect(green.completed_count).to eq(0)\n end\n\n context \"when reset_manually is set\" do\n let(:reset_manually) { true }\n\n it \"does not reset alternatives\" do\n expect(green.participant_count).to eq(2)\n expect(green.completed_count).to eq(0)\n end\n end\n end\n end\n\n describe \"alternatives passed as non-strings\" do\n it \"should throw an exception if an alternative is passed that is not a string\" do\n expect { Split::ExperimentCatalog.find_or_create(\"link_color\", :blue, :red) }.to raise_error(ArgumentError)\n expect { Split::ExperimentCatalog.find_or_create(\"link_enabled\", true, false) }.to raise_error(ArgumentError)\n end\n end\n\n describe \"specifying weights\" do\n let(:experiment_with_weight) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", { \"blue\" => 1 }, { \"red\" => 2 })\n }\n\n it \"should work for a new experiment\" do\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n\n it \"should work for an existing experiment\" do\n experiment.save\n expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2])\n end\n end\n\n describe \"specifying goals\" do\n let(:experiment) {\n new_experiment([\"purchase\"])\n }\n\n context \"saving experiment\" do\n let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\") }\n\n before { experiment.save }\n\n it \"can find existing experiment\" do\n expect(Split::ExperimentCatalog.find(\"link_color\").name).to eq(\"link_color\")\n end\n\n it \"should reset an experiment if it is loaded with different goals\" do\n same_but_different_goals\n expect(Split::ExperimentCatalog.find(\"link_color\").goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n it \"should have goals\" do\n expect(experiment.goals).to eq([\"purchase\"])\n end\n\n context \"find or create experiment\" do\n it \"should have correct goals\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n experiment = Split::ExperimentCatalog.find_or_create(\"link_color3\", \"blue\", \"red\", \"green\")\n expect(experiment.goals).to eq([])\n end\n end\n end\n\n describe \"beta probability calculation\" do\n it \"should return a hash with the probability of each alternative being the best\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"mathematicians\", \"bernoulli\", \"poisson\", \"lagrange\")\n experiment.calc_winning_alternatives\n expect(experiment.alternative_probabilities).not_to be_nil\n end\n\n it \"should return between 46% and 54% probability for an experiment with 2 alternatives and no data\" do\n experiment = Split::ExperimentCatalog.find_or_create(\"scientists\", \"einstein\", \"bohr\")\n experiment.calc_winning_alternatives\n expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50)\n end\n\n it \"should calculate the probability of being the winning alternative separately for each goal\", skip: true do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n goal1 = experiment.goals[0]\n goal2 = experiment.goals[1]\n experiment.alternatives.each do |alternative|\n alternative.participant_count = 50\n alternative.set_completed_count(10, goal1)\n alternative.set_completed_count(15+rand(30), goal2)\n end\n experiment.calc_winning_alternatives\n alt = experiment.alternatives[0]\n p_goal1 = alt.p_winner(goal1)\n p_goal2 = alt.p_winner(goal2)\n expect(p_goal1).not_to be_within(0.04).of(p_goal2)\n end\n\n it \"should return nil and not re-calculate probabilities if they have already been calculated today\" do\n experiment = Split::ExperimentCatalog.find_or_create({ \"link_color3\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\", \"green\")\n expect(experiment.calc_winning_alternatives).not_to be nil\n expect(experiment.calc_winning_alternatives).to be nil\n end\n end\nend\n\n Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one.\n\n @@ -225,7 +225,8 @@ describe Split::Experiment do\n end\n \n it \"should use the specified algorithm if a winner does not exist\" do\n- Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n+ experiment.algorithm = Split::Algorithms::Whiplash\n+ experiment.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color'))\n experiment.next_alternative.name.should eql('green')\n end\n end\n"},"addition_count":{"kind":"number","value":2,"string":"2"},"commit_subject":{"kind":"string","value":"Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one."},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675175,"cells":{"id":{"kind":"string","value":"10070825"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n\n context \"for an experiment that the user is excluded from\" do\n before do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n finished({\"link_color\" => \"purchase\"})\n new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1)\n new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2)\n end\n end\n end\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Simplify goals helper spec\n\n @@ -900,13 +900,15 @@ describe Split::Helper do\n end\n \n it \"should increment the counter for the specified-goal completed alternative\" do\n- @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n- @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n- finished({\"link_color\" => \"purchase\"})\n- new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n- new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1)\n- new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n- new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2)\n+ lambda {\n+ lambda {\n+ finished({\"link_color\" => [\"purchase\"]})\n+ }.should_not change {\n+ Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n+ }\n+ }.should change {\n+ Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n+ }.by(1)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":9,"string":"9"},"commit_subject":{"kind":"string","value":"Simplify goals helper spec"},"deletion_count":{"kind":"number","value":7,"string":"7"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675176,"cells":{"id":{"kind":"string","value":"10070826"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n\n context \"for an experiment that the user is excluded from\" do\n before do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n finished({\"link_color\" => \"purchase\"})\n new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1)\n new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2)\n end\n end\n end\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Simplify goals helper spec\n\n @@ -900,13 +900,15 @@ describe Split::Helper do\n end\n \n it \"should increment the counter for the specified-goal completed alternative\" do\n- @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n- @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n- finished({\"link_color\" => \"purchase\"})\n- new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n- new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1)\n- new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n- new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2)\n+ lambda {\n+ lambda {\n+ finished({\"link_color\" => [\"purchase\"]})\n+ }.should_not change {\n+ Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n+ }\n+ }.should change {\n+ Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n+ }.by(1)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":9,"string":"9"},"commit_subject":{"kind":"string","value":"Simplify goals helper spec"},"deletion_count":{"kind":"number","value":7,"string":"7"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675177,"cells":{"id":{"kind":"string","value":"10070827"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n end\n\n it \"should not call the method without alternative\" do\n ab_user[@experiment.key] = nil\n expect(self).not_to receive(:some_method)\n ab_finished(@experiment_name)\n end\n end\n end\n\n context \"for an experiment that the user is excluded from\" do\n before do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Alternative.new(alternative, \"link_color\").participant_count).to eq(1)\n alternative = ab_test(\"button_size\", \"small\", \"big\")\n expect(Split::Alternative.new(alternative, \"button_size\").participant_count).to eq(0)\n end\n\n it \"should not increment the completed counter\" do\n # So, user should be participating in the link_color experiment and\n # receive the control for button_size. As the user is not participating in\n # the button size experiment, finishing it should not increase the\n # completion count for that alternative.\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(\"small\", \"button_size\").completed_count }\n end\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n finished({\"link_color\" => \"purchase\"})\n new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1)\n new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2)\n end\n end\n end\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Simplify goals helper spec\n\n @@ -900,13 +900,15 @@ describe Split::Helper do\n end\n \n it \"should increment the counter for the specified-goal completed alternative\" do\n- @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n- @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n- finished({\"link_color\" => \"purchase\"})\n- new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n- new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1)\n- new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n- new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2)\n+ lambda {\n+ lambda {\n+ finished({\"link_color\" => [\"purchase\"]})\n+ }.should_not change {\n+ Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2)\n+ }\n+ }.should change {\n+ Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1)\n+ }.by(1)\n end\n end\n end\n"},"addition_count":{"kind":"number","value":9,"string":"9"},"commit_subject":{"kind":"string","value":"Simplify goals helper spec"},"deletion_count":{"kind":"number","value":7,"string":"7"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675178,"cells":{"id":{"kind":"string","value":"10070828"},"text":{"kind":"string","value":" CHANGELOG.md\n ## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Bump minimum required redis to 4.2 (@andrehjr, #628)\n- Removed repeated loading from config (@robin-phung, #619)\n- Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632)\n- Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631)\n- Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630)\n- Fix typo of in `Split::Trial` class variable (TomasBarry, #644)\n- Single HSET to update values, instead of multiple ones (@andrehjr, #640)\n- Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659)\n- Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639)\n- Adding documentation related to what is stored on cookies. (@andrehjr, #634)\n- Keep railtie defined under the Split gem namespace (@avit, #666)\n- Update RSpec helper to support block syntax (@clowder, #665)\n\n## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n\n## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release.\n- Make dashboard pagination default \"per\" param configurable (@alopatin, #597)\n\nBugfixes:\n- Fix `force_alternative` for experiments with incremented version (@giraffate, #568)\n- Persist alternative weights (@giraffate, #570)\n- Combined experiment performance improvements (@gnanou, #575)\n- Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577)\n- When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582)\n\nMisc:\n- Remove `rubyforge_project` from gemspec (@giraffate, #583)\n- Fix URLs to replace http with https (@giraffate , #584)\n- Lazily include split helpers in ActionController::Base (@hasghari, #586)\n- Fix unused variable warnings (@andrehjr, #592)\n- Fix ruby warnings (@andrehjr, #593)\n- Update rubocop.yml config (@andrehjr, #594)\n- Add frozen_string_literal to all files that were missing it (@andrehjr, #595)\n\n## 3.3.2 (April 12th, 2019)\n\nFeatures:\n- Added uptime robot to configuration.rb (@razel1982, #556)\n- Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555)\n\nBugfixes:\n- Fix error message interpolation (@hanibash, #553)\n- Fix Bigdecimal warnings (@agraves, #551)\n- Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544)\n- Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr)\n\nMisc:\n- Update travis to add Rails 6 (@edmilton, #559)\n- Fix broken specs in developement environment (@dougpetronilio, #557)\n\n## 3.3.1 (January 11th, 2019)\n\nFeatures:\n- Filter some more bots (@janosch-x, #542)\n\nBugfixes:\n- Fix Dashboard Pagination Helper typo (@cattekin, #541)\n- Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539)\n- fix user participating alternative not found (@NaturalHokke, #536)\n\nMisc:\n- Tweak RSpec instructions (@eliotsykes, #540)\n- Improve README regarding rspec usage (@vermaxik, #538)\n\n## 3.3.0 (August 13th, 2018)\n\nFeatures:\n\n- Added pagination for dashboard (@GeorgeGorbanev, #518)\n- Add Facebot crawler to list of bots (@pfeiffer, #530)\n- Ignore previewing requests (@pfeiffer, #531)\n- Fix binding of ignore_filter (@pfeiffer, #533)\n\nBugfixes:\n\n- Fix cookie header duplication (@andrehjr, #522)\n\nPerformance:\n\n- Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509)\n\nMisc:\n\n- Update development dependencies\n- test rails 5.2 on travis (@lostapathy, #524)\n- update ruby versions for travis (@lostapathy, #525)\n\n## 3.2.0 (September 21st, 2017)\n\nFeatures:\n\n- Allow configuration of how often winning alternatives are recalculated (@patbl, #501)\n\nBugfixes:\n\n- Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503)\n- Fix combined experiments (@semanticart, #502)\n\n## 3.1.1 (August 30th, 2017)\n\nBugfixes:\n\n- Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498)\n\nMisc:\n\n- Document testing with RSpec (@eliotsykes, #495)\n\n## 3.1.0 (August 14th, 2017)\n\nFeatures:\n\n- Support for combined experiments (@daviddening, #493)\n- Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490)\n- Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487)\n\nBugfixes:\n\n- Blocked a few more common bot user agents (@kylerippey, #485)\n\nMisc:\n\n- Repository Audit by Maintainer.io (@RichardLitt, #484)\n- Update development dependencies\n- Test on ruby 2.4.1\n- Test compatibility with rails 5.1\n- Add uris to metadata section in gemspec\n\n## 3.0.0 (March 30th, 2017)\n\nFeatures:\n\n- added block randomization algorithm and specs (@hulleywood, #475)\n- Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460)\n\nBugfixes:\n\n- Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470)\n- Fix issue where redis isn't required (@tomciopp , #466)\n\nMisc:\n\n- Avoid variable_size_secure_compare private method (@eliotsykes, #465)\n\n## 2.2.0 (November 11th, 2016)\n\n**Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset.\n\nFeatures:\n\n- Remove dependency on Redis::Namespace (@bschaeffer, #425)\n- Make resetting on experiment change optional (@moggyboy, #430)\n- Add ability to force alternative on dashboard (@ccallebs, #437)\n\nBugfixes:\n\n- Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432)\n\nMisc:\n\n- Remove Explicit Return (@BradHudson, #441)\n- Update Redis config docs (@bschaeffer, #422)\n- Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443)\n- Removed a couple old ruby 1.8 hacks (@andrew, #456)\n- Run tests on rails 5 (@andrew, #457)\n- Fixed a few codeclimate warnings (@andrew, #458)\n- Use codeclimate for test coverage (@andrew #455)\n\n## 2.1.0 (August 8th, 2016)\n\nFeatures:\n\n- Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426)\n\n## 2.0.0 (July 17th, 2016)\n\nBreaking changes:\n\n- Removed deprecated `finished` and `begin_experiment` methods\n- Namespaced override param to avoid potential clashes (@henrik, #398)\n\n## 1.7.0 (June 28th, 2016)\n\nFeatures:\n\n- Running concurrent experiments on same endpoint/view (@karmakaze, #421)\n\n## 1.6.0 (June 16th, 2016)\n\nFeatures:\n\n- Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420)\n\n## 1.5.0 (June 8th, 2016)\n\nFeatures:\n\n- Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409)\n- Optional custom persistence adapter (@ndelage, #411)\n\nMisc:\n\n- Use fakeredis for testing (@andrew, #412)\n\n## 1.4.5 (June 7th, 2016)\n\nBugfixes:\n\n- FIX Negative numbers on non-finished (@divineforest, #408)\n- Eliminate extra RedisAdapter hget (@karmakaze, #407)\n- Remove unecessary code from Experiment class (@pakallis, #391, #392, #393)\n\nMisc:\n\n- Simplify Configuration#normalized_experiments (@pakallis, #395)\n- Clarify test running instructions (@henrik, #397)\n\n## 1.4.4 (May 9th, 2016)\n\nBugfixes:\n\n- Increment participation if store override is true and no experiment key exists (@spheric, #380)\n\nMisc:\n\n- Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389)\n- Added minimum version requirement to simple-random\n- Clarify finished with first option being a hash in Readme (@henrik, #382)\n- Refactoring the User abstraction (@andreibondarev, #384)\n\n## 1.4.3 (April 28th, 2016)\n\nFeatures:\n\n- add on_trial callback whenever a trial is started (@mtyeh411, #375)\n\nBugfixes:\n\n- Allow algorithm configuration at experiment level (@007sumit, #376)\n\nMisc:\n\n- only choose override if it exists as valid alternative (@spheric, #377)\n\n## 1.4.2 (April 25th, 2016)\n\nMisc:\n\n- Deprecated some legacy methods (@andreibondarev, #374)\n\n## 1.4.1 (April 21st, 2016)\n\nBugfixes:\n\n- respect manual start configuration after an experiment has been deleted (@mtyeh411, #372)\n\nMisc:\n\n- Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365)\n- Revise specs according to http://betterspecs.org/ (@hkliya, #369)\n\n## 1.4.0 (April 2nd, 2016)\n\nFeatures:\n\n- Added experiment filters to dashboard (@ccallebs, #363, #364)\n- Added Contributor Covenant Code of Conduct\n\n## 1.3.2 (January 2nd, 2016)\n\nBugfixes:\n\n- Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352)\n\n## 1.3.1 (January 1st, 2016)\n\nBugfixes:\n\n- Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349)\n\n## 1.3.0 (October 20th, 2015)\n\nFeatures:\n\n - allow for custom redis_url different from ENV variable (@davidgrieser, #323)\n - add ability to change the length of the persistence cookie (@peterylai, #335)\n\nBugfixes:\n\n - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342)\n - Fix active experiments when experiment is on a later version (@ndrisso, #331)\n - Fix caching of winning alternative (@nfm, #329)\n\nMisc:\n\n - Remove duplication from Experiment#save (@pakallis, #333)\n - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332)\n\n## 1.2.1 (May 17th, 2015)\n\nFeatures:\n\n - Handle redis DNS resolution failures gracefully (@fusion2004, #310)\n - Push metadata to ab_test block (@ekorneeff, #296)\n - Helper methods are now private when included in controllers (@ipoval, #303)\n\nBugfixes:\n\n - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313)\n - Don't use capture helper from ActionView (@tomasdundacek, #312)\n\nMisc:\n\n - Remove body \"max-width\" from dashboard (@xicreative, #299)\n - fix private for class methods (@ipoval, #301)\n - minor memoization fix in spec (@ipoval, #304)\n - Minor documentation fixes (#295, #297, #305, #308)\n\n## 1.2.0 (January 24th, 2015)\n\nFeatures:\n\n - Configure redis using environment variables if available (@saratovsource , #293)\n - Store metadata on experiment configuration (@dekz, #291)\n\nBugfixes:\n\n - Revert the Trial#complete! public API to support noargs (@dekz, #292)\n\n## 1.1.0 (January 9th, 2015)\n\nChanges:\n\n - Public class methods on `Split::Experiment` (e.g., `find_or_create`)\n have been moved to `Split::ExperimentCatalog`.\n\nFeatures:\n\n - Decouple trial from Split::Helper (@joshdover, #286)\n - Helper method for Active Experiments (@blahblahblah-, #273)\n\nMisc:\n\n - Use the new travis container based infrastructure for tests (@andrew, #280)\n\n## 1.0.0 (October 12th, 2014)\n\nChanges:\n\n - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271)\n\n## 0.8.0 (September 25th, 2014)\n\nFeatures:\n\n - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251)\n - support multiple metrics per experiment (@stevenou, #260)\n\nBugfixes:\n\n - Avoiding call to params in EncapsulatedHelper (@afn, #257)\n\n## 0.7.3 (September 16th, 2014)\n\nFeatures:\n\n - Disable all split tests via a URL parameter (@hwartig, #263)\n\nBugfixes:\n\n - Correctly escape experiment names on dashboard (@ecaron, #265)\n - Handle redis connection exception error properly (@andrew, #245)\n\n## 0.7.2 (June 12th, 2014)\n\nFeatures:\n\n - Show metrics on the dashboard (@swrobel, #241)\n\nBugfixes:\n\n - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253)\n - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255)\n\n## 0.7.1 (March 20th, 2014)\n\nFeatures:\n\n - You can now reopen experiment from the dashboard (@mikezaby, #235)\n\nMisc:\n\n - Internal code tidy up (@IanVaughan, #238)\n\n## 0.7.0 (December 26th, 2013)\n\nFeatures:\n\n - Significantly improved z-score algorithm (@caser ,#221)\n - Better sorting of Experiments on dashboard (@wadako111, #218)\n\nBugfixes:\n\n - Fixed start button not being displayed in some cases (@vigosan, #219)\n\nMisc:\n\n - Experiment#initialize refactoring (@nberger, #224)\n - Extract ExperimentStore into a seperate class (@nberger, #225)\n\n## 0.6.6 (October 15th, 2013)\n\nFeatures:\n\n - Sort experiments on Dashboard so \"active\" ones without a winner appear first (@swrobel, #204)\n - Starting tests manually (@duksis, #209)\n\nBugfixes:\n\n - Only trigger completion callback with valid Trial (@segfaultAX, #208)\n - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213)\n\nMisc:\n\n - Added more bots to filter list (@lbeder, #214, #215, #216)\n\n## 0.6.5 (August 23, 2013)\n\nFeatures:\n\n - Added Redis adapter for persisting experiments across sessions (@fengb, #203)\n\nMisc:\n\n - Expand upon algorithms section in README (@swrobel, #200)\n\n## 0.6.4 (August 8, 2013)\n\nFeatures:\n\n - Add hooks for experiment deletion and resetting (@craigmcnamara, #198)\n - Allow Split::Helper to be used outside of a controller (@nfm, #190)\n - Show current Rails/Rack Env in dashboard (@rceee, #187)\n\nBugfixes:\n\n - Fix whiplash algorithm when using goals (@swrobel, #193)\n\nMisc:\n\n - Refactor dashboard js (@buddhamagnet)\n\n## 0.6.3 (July 8, 2013)\n\nFeatures:\n\n - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176)\n\nBugfixes:\n\n - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177)\n\n## 0.6.2 (June 6, 2013)\n\nFeatures:\n\n - Rails 2.3 compatibility (@bhcarpenter, #167)\n - Adding possibility to store overridden alternative (@duksis, #173)\n\nMisc:\n\n - Now testing against multiple versions of rails\n\n## 0.6.1 (May 4, 2013)\n\nBugfixes:\n\n - Use the specified algorithm for the experiment instead of the default (@woodhull, #165)\n\nMisc:\n\n - Ensure experiements are valid when configuring (@ashmckenzie, #159)\n - Allow arrays to be passed to ab_test (@fenelon, #156)\n\n## 0.6.0 (April 4, 2013)\n\nFeatures:\n\n - Support for Ruby 2.0.0 (@phoet, #142)\n - Multiple Goals (@liujin, #109)\n - Ignoring IPs using Regular Expressions (@waynemoore, #119)\n - Added ability to add more bots to the default list (@themgt, #140)\n - Allow custom configuration of user blocking logic (@phoet , #148)\n\nBugfixes:\n\n - Fixed regression in handling of config files (@iangreenleaf, #115)\n - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67)\n - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126)\n\nMisc:\n\n - updated minimum json version requirement\n - Refactor Yaml Configuration (@rtwomey, #124)\n - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118)\n - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko)\n - Improved Readme (@iangreenleaf @phoet)\n\n## 0.5.0 (January 28, 2013)\n\nFeatures:\n\n - Persistence Adapters: Cookies and Session (@patbenatar, #98)\n - Configure experiments from a hash (@iangreenleaf, #97)\n - Pluggable sampling algorithms (@woodhull, #105)\n\nBugfixes:\n\n - Fixed negative number of non-finished rates (@philnash, #83)\n - Fixed behaviour of finished(:reset => false) (@philnash, #88)\n - Only take into consideration positive z-scores (@thomasmaas, #96)\n - Amended ab_test method to raise ArgumentError if passed integers or symbols as\n alternatives (@buddhamagnet, #81)\n\n## 0.4.6 (October 28, 2012)\n\nFeatures:\n\n - General code quality improvements (@buddhamagnet, #79)\n\nBugfixes:\n\n - Don't increment the experiment counter if user has finished (@dimko, #78)\n - Fixed an incorrect test (@jaywengrow, #74)\n\n## 0.4.5 (August 30, 2012)\n\nBugfixes:\n\n - Fixed header gradient in FF/Opera (@philnash, #69)\n - Fixed reseting of experiment in session (@apsoto, #43)\n\n## 0.4.4 (August 9, 2012)\n\nFeatures:\n\n - Allow parameter overrides, even without Redis. (@bhcarpenter, #62)\n\nBugfixes:\n\n - Fixes version number always increasing when alternatives are changed (@philnash, #63)\n - updated guard-rspec to version 1.2\n\n## 0.4.3 (July 8, 2012)\n\nFeatures:\n\n - redis failover now recovers from all redis-related exceptions\n\n## 0.4.2 (June 1, 2012)\n\nFeatures:\n\n - Now works with v3.0 of redis gem\n\nBugfixes:\n\n - Fixed redis failover on Rubinius\n\n## 0.4.1 (April 6, 2012)\n\nFeatures:\n\n - Added configuration option to disable Split testing (@ilyakatz, #45)\n\nBugfixes:\n\n - Fix weights for existing experiments (@andreas, #40)\n - Fixed dashboard range error (@andrew, #42)\n\n## 0.4.0 (March 7, 2012)\n\n**IMPORTANT**\n\nIf using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug.\n\nFeatures:\n\n - Experiments now record when they were started (@vrish88, #35)\n - Old versions of experiments in sessions are now cleaned up\n - Avoid users participating in multiple experiments at once (#21)\n\nBugfixes:\n\n - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34)\n - confidence_level helper should handle tiny z-scores (#23)\n\n## 0.3.3 (February 16, 2012)\n\nBugfixes:\n\n - Fixed redis failover when a block was passed to ab_test (@layflags, #33)\n\n## 0.3.2 (February 12, 2012)\n\nFeatures:\n\n - Handle redis errors gracefully (@layflags, #32)\n\n## 0.3.1 (November 19, 2011)\n\nFeatures:\n\n - General code tidy up (@ryanlecompte, #22, @mocoso, #28)\n - Lazy loading data from Redis (@lautis, #25)\n\nBugfixes:\n\n - Handle unstarted experiments (@mocoso, #27)\n - Relaxed Sinatra version requirement (@martinclu, #24)\n\n\n## 0.3.0 (October 9, 2011)\n\nFeatures:\n\n - Redesigned dashboard (@mrappleton, #17)\n - Use atomic increments in redis for better concurrency (@lautis, #18)\n - Weighted alternatives\n\nBugfixes:\n\n - Fix to allow overriding of experiments that aren't on version 1\n\n\n## 0.2.4 (July 18, 2011)\n\nFeatures:\n\n - Added option to finished to not reset the users session\n\nBugfixes:\n\n - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols\n\n## 0.2.3 (June 26, 2011)\n\nFeatures:\n\n - Experiments can now be deleted from the dashboard\n - ab_test helper now accepts a block\n - Improved dashboard\n\nBugfixes:\n\n - After resetting an experiment, existing users of that experiment will also be reset\n\n## 0.2.2 (June 11, 2011)\n\nFeatures:\n\n - Updated redis-namespace requirement to 1.0.3\n - Added a configuration object for changing options\n - Robot regex can now be changed via a configuration options\n - Added ability to ignore visits from specified IP addresses\n - Dashboard now shows percentage improvement of alternatives compared to the control\n - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives\n\nBugfixes:\n\n - Saving an experiment multiple times no longer creates duplicate alternatives\n\n## 0.2.1 (May 29, 2011)\n\nBugfixes:\n\n - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x\n\n## 0.2.0 (May 29, 2011)\n\nFeatures:\n\n - Override an alternative via a url parameter\n - Experiments can now be reset from the dashboard\n - The first alternative is now considered the control\n - General dashboard usability improvements\n - Robots are ignored and given the control alternative\n\nBugfixes:\n\n - Alternatives are now store in a list rather than a set to ensure consistent ordering\n - Fixed diving by zero errors\n\n## 0.1.1 (May 18, 2011)\n\nBugfixes:\n\n - More Robust conversion rate display on dashboard\n - Ensure `Split::Version` is available everywhere, fixed dashboard\n\n## 0.1.0 (May 17, 2011)\n\nInitial Release\n\n Update CHANGELOG.md\n\n @@ -1,3 +1,23 @@\n+## Unreleased 4.0.0\n+\n+Bugfixes:\n+- ab_test must return metadata on error or if split is disabled/excluded user (@andrehjr, #622)\n+- Fix versioned experiments when used with allow_multiple_experiments=control (@andrehjr, #613)\n+- Only block Pinterest bot (@huoxito, #606)\n+- Respect experiment defaults when loading experiments in initializer. (@mattwd7, #599)\n+\n+Features:\n+- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625)\n+- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616)\n+- Introduce enable/disable experiment cohorting (@robin-phung, #615)\n+- Add on_experiment_winner_choose callback (@GenaMinenkov, #574)\n+\n+Misc:\n+- Drop support for Ruby < 2.5 (@andrehjr, #627)\n+- Drop support for Rails < 5 (@andrehkr, #607)\n+- Bump minimum required redis to 4.2 (@andrehjr, #628)\n+- Removed repeated loading from config (@robin-phung, #619)\n+\n ## 3.4.1 (November 12th, 2019)\n \n Bugfixes:\n"},"addition_count":{"kind":"number","value":20,"string":"20"},"commit_subject":{"kind":"string","value":"Update CHANGELOG.md"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675179,"cells":{"id":{"kind":"string","value":"10070829"},"text":{"kind":"string","value":" CHANGELOG.md\n ## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Bump minimum required redis to 4.2 (@andrehjr, #628)\n- Removed repeated loading from config (@robin-phung, #619)\n- Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632)\n- Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631)\n- Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630)\n- Fix typo of in `Split::Trial` class variable (TomasBarry, #644)\n- Single HSET to update values, instead of multiple ones (@andrehjr, #640)\n- Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659)\n- Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639)\n- Adding documentation related to what is stored on cookies. (@andrehjr, #634)\n- Keep railtie defined under the Split gem namespace (@avit, #666)\n- Update RSpec helper to support block syntax (@clowder, #665)\n\n## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n\n## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release.\n- Make dashboard pagination default \"per\" param configurable (@alopatin, #597)\n\nBugfixes:\n- Fix `force_alternative` for experiments with incremented version (@giraffate, #568)\n- Persist alternative weights (@giraffate, #570)\n- Combined experiment performance improvements (@gnanou, #575)\n- Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577)\n- When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582)\n\nMisc:\n- Remove `rubyforge_project` from gemspec (@giraffate, #583)\n- Fix URLs to replace http with https (@giraffate , #584)\n- Lazily include split helpers in ActionController::Base (@hasghari, #586)\n- Fix unused variable warnings (@andrehjr, #592)\n- Fix ruby warnings (@andrehjr, #593)\n- Update rubocop.yml config (@andrehjr, #594)\n- Add frozen_string_literal to all files that were missing it (@andrehjr, #595)\n\n## 3.3.2 (April 12th, 2019)\n\nFeatures:\n- Added uptime robot to configuration.rb (@razel1982, #556)\n- Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555)\n\nBugfixes:\n- Fix error message interpolation (@hanibash, #553)\n- Fix Bigdecimal warnings (@agraves, #551)\n- Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544)\n- Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr)\n\nMisc:\n- Update travis to add Rails 6 (@edmilton, #559)\n- Fix broken specs in developement environment (@dougpetronilio, #557)\n\n## 3.3.1 (January 11th, 2019)\n\nFeatures:\n- Filter some more bots (@janosch-x, #542)\n\nBugfixes:\n- Fix Dashboard Pagination Helper typo (@cattekin, #541)\n- Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539)\n- fix user participating alternative not found (@NaturalHokke, #536)\n\nMisc:\n- Tweak RSpec instructions (@eliotsykes, #540)\n- Improve README regarding rspec usage (@vermaxik, #538)\n\n## 3.3.0 (August 13th, 2018)\n\nFeatures:\n\n- Added pagination for dashboard (@GeorgeGorbanev, #518)\n- Add Facebot crawler to list of bots (@pfeiffer, #530)\n- Ignore previewing requests (@pfeiffer, #531)\n- Fix binding of ignore_filter (@pfeiffer, #533)\n\nBugfixes:\n\n- Fix cookie header duplication (@andrehjr, #522)\n\nPerformance:\n\n- Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509)\n\nMisc:\n\n- Update development dependencies\n- test rails 5.2 on travis (@lostapathy, #524)\n- update ruby versions for travis (@lostapathy, #525)\n\n## 3.2.0 (September 21st, 2017)\n\nFeatures:\n\n- Allow configuration of how often winning alternatives are recalculated (@patbl, #501)\n\nBugfixes:\n\n- Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503)\n- Fix combined experiments (@semanticart, #502)\n\n## 3.1.1 (August 30th, 2017)\n\nBugfixes:\n\n- Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498)\n\nMisc:\n\n- Document testing with RSpec (@eliotsykes, #495)\n\n## 3.1.0 (August 14th, 2017)\n\nFeatures:\n\n- Support for combined experiments (@daviddening, #493)\n- Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490)\n- Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487)\n\nBugfixes:\n\n- Blocked a few more common bot user agents (@kylerippey, #485)\n\nMisc:\n\n- Repository Audit by Maintainer.io (@RichardLitt, #484)\n- Update development dependencies\n- Test on ruby 2.4.1\n- Test compatibility with rails 5.1\n- Add uris to metadata section in gemspec\n\n## 3.0.0 (March 30th, 2017)\n\nFeatures:\n\n- added block randomization algorithm and specs (@hulleywood, #475)\n- Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460)\n\nBugfixes:\n\n- Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470)\n- Fix issue where redis isn't required (@tomciopp , #466)\n\nMisc:\n\n- Avoid variable_size_secure_compare private method (@eliotsykes, #465)\n\n## 2.2.0 (November 11th, 2016)\n\n**Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset.\n\nFeatures:\n\n- Remove dependency on Redis::Namespace (@bschaeffer, #425)\n- Make resetting on experiment change optional (@moggyboy, #430)\n- Add ability to force alternative on dashboard (@ccallebs, #437)\n\nBugfixes:\n\n- Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432)\n\nMisc:\n\n- Remove Explicit Return (@BradHudson, #441)\n- Update Redis config docs (@bschaeffer, #422)\n- Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443)\n- Removed a couple old ruby 1.8 hacks (@andrew, #456)\n- Run tests on rails 5 (@andrew, #457)\n- Fixed a few codeclimate warnings (@andrew, #458)\n- Use codeclimate for test coverage (@andrew #455)\n\n## 2.1.0 (August 8th, 2016)\n\nFeatures:\n\n- Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426)\n\n## 2.0.0 (July 17th, 2016)\n\nBreaking changes:\n\n- Removed deprecated `finished` and `begin_experiment` methods\n- Namespaced override param to avoid potential clashes (@henrik, #398)\n\n## 1.7.0 (June 28th, 2016)\n\nFeatures:\n\n- Running concurrent experiments on same endpoint/view (@karmakaze, #421)\n\n## 1.6.0 (June 16th, 2016)\n\nFeatures:\n\n- Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420)\n\n## 1.5.0 (June 8th, 2016)\n\nFeatures:\n\n- Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409)\n- Optional custom persistence adapter (@ndelage, #411)\n\nMisc:\n\n- Use fakeredis for testing (@andrew, #412)\n\n## 1.4.5 (June 7th, 2016)\n\nBugfixes:\n\n- FIX Negative numbers on non-finished (@divineforest, #408)\n- Eliminate extra RedisAdapter hget (@karmakaze, #407)\n- Remove unecessary code from Experiment class (@pakallis, #391, #392, #393)\n\nMisc:\n\n- Simplify Configuration#normalized_experiments (@pakallis, #395)\n- Clarify test running instructions (@henrik, #397)\n\n## 1.4.4 (May 9th, 2016)\n\nBugfixes:\n\n- Increment participation if store override is true and no experiment key exists (@spheric, #380)\n\nMisc:\n\n- Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389)\n- Added minimum version requirement to simple-random\n- Clarify finished with first option being a hash in Readme (@henrik, #382)\n- Refactoring the User abstraction (@andreibondarev, #384)\n\n## 1.4.3 (April 28th, 2016)\n\nFeatures:\n\n- add on_trial callback whenever a trial is started (@mtyeh411, #375)\n\nBugfixes:\n\n- Allow algorithm configuration at experiment level (@007sumit, #376)\n\nMisc:\n\n- only choose override if it exists as valid alternative (@spheric, #377)\n\n## 1.4.2 (April 25th, 2016)\n\nMisc:\n\n- Deprecated some legacy methods (@andreibondarev, #374)\n\n## 1.4.1 (April 21st, 2016)\n\nBugfixes:\n\n- respect manual start configuration after an experiment has been deleted (@mtyeh411, #372)\n\nMisc:\n\n- Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365)\n- Revise specs according to http://betterspecs.org/ (@hkliya, #369)\n\n## 1.4.0 (April 2nd, 2016)\n\nFeatures:\n\n- Added experiment filters to dashboard (@ccallebs, #363, #364)\n- Added Contributor Covenant Code of Conduct\n\n## 1.3.2 (January 2nd, 2016)\n\nBugfixes:\n\n- Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352)\n\n## 1.3.1 (January 1st, 2016)\n\nBugfixes:\n\n- Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349)\n\n## 1.3.0 (October 20th, 2015)\n\nFeatures:\n\n - allow for custom redis_url different from ENV variable (@davidgrieser, #323)\n - add ability to change the length of the persistence cookie (@peterylai, #335)\n\nBugfixes:\n\n - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342)\n - Fix active experiments when experiment is on a later version (@ndrisso, #331)\n - Fix caching of winning alternative (@nfm, #329)\n\nMisc:\n\n - Remove duplication from Experiment#save (@pakallis, #333)\n - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332)\n\n## 1.2.1 (May 17th, 2015)\n\nFeatures:\n\n - Handle redis DNS resolution failures gracefully (@fusion2004, #310)\n - Push metadata to ab_test block (@ekorneeff, #296)\n - Helper methods are now private when included in controllers (@ipoval, #303)\n\nBugfixes:\n\n - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313)\n - Don't use capture helper from ActionView (@tomasdundacek, #312)\n\nMisc:\n\n - Remove body \"max-width\" from dashboard (@xicreative, #299)\n - fix private for class methods (@ipoval, #301)\n - minor memoization fix in spec (@ipoval, #304)\n - Minor documentation fixes (#295, #297, #305, #308)\n\n## 1.2.0 (January 24th, 2015)\n\nFeatures:\n\n - Configure redis using environment variables if available (@saratovsource , #293)\n - Store metadata on experiment configuration (@dekz, #291)\n\nBugfixes:\n\n - Revert the Trial#complete! public API to support noargs (@dekz, #292)\n\n## 1.1.0 (January 9th, 2015)\n\nChanges:\n\n - Public class methods on `Split::Experiment` (e.g., `find_or_create`)\n have been moved to `Split::ExperimentCatalog`.\n\nFeatures:\n\n - Decouple trial from Split::Helper (@joshdover, #286)\n - Helper method for Active Experiments (@blahblahblah-, #273)\n\nMisc:\n\n - Use the new travis container based infrastructure for tests (@andrew, #280)\n\n## 1.0.0 (October 12th, 2014)\n\nChanges:\n\n - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271)\n\n## 0.8.0 (September 25th, 2014)\n\nFeatures:\n\n - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251)\n - support multiple metrics per experiment (@stevenou, #260)\n\nBugfixes:\n\n - Avoiding call to params in EncapsulatedHelper (@afn, #257)\n\n## 0.7.3 (September 16th, 2014)\n\nFeatures:\n\n - Disable all split tests via a URL parameter (@hwartig, #263)\n\nBugfixes:\n\n - Correctly escape experiment names on dashboard (@ecaron, #265)\n - Handle redis connection exception error properly (@andrew, #245)\n\n## 0.7.2 (June 12th, 2014)\n\nFeatures:\n\n - Show metrics on the dashboard (@swrobel, #241)\n\nBugfixes:\n\n - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253)\n - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255)\n\n## 0.7.1 (March 20th, 2014)\n\nFeatures:\n\n - You can now reopen experiment from the dashboard (@mikezaby, #235)\n\nMisc:\n\n - Internal code tidy up (@IanVaughan, #238)\n\n## 0.7.0 (December 26th, 2013)\n\nFeatures:\n\n - Significantly improved z-score algorithm (@caser ,#221)\n - Better sorting of Experiments on dashboard (@wadako111, #218)\n\nBugfixes:\n\n - Fixed start button not being displayed in some cases (@vigosan, #219)\n\nMisc:\n\n - Experiment#initialize refactoring (@nberger, #224)\n - Extract ExperimentStore into a seperate class (@nberger, #225)\n\n## 0.6.6 (October 15th, 2013)\n\nFeatures:\n\n - Sort experiments on Dashboard so \"active\" ones without a winner appear first (@swrobel, #204)\n - Starting tests manually (@duksis, #209)\n\nBugfixes:\n\n - Only trigger completion callback with valid Trial (@segfaultAX, #208)\n - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213)\n\nMisc:\n\n - Added more bots to filter list (@lbeder, #214, #215, #216)\n\n## 0.6.5 (August 23, 2013)\n\nFeatures:\n\n - Added Redis adapter for persisting experiments across sessions (@fengb, #203)\n\nMisc:\n\n - Expand upon algorithms section in README (@swrobel, #200)\n\n## 0.6.4 (August 8, 2013)\n\nFeatures:\n\n - Add hooks for experiment deletion and resetting (@craigmcnamara, #198)\n - Allow Split::Helper to be used outside of a controller (@nfm, #190)\n - Show current Rails/Rack Env in dashboard (@rceee, #187)\n\nBugfixes:\n\n - Fix whiplash algorithm when using goals (@swrobel, #193)\n\nMisc:\n\n - Refactor dashboard js (@buddhamagnet)\n\n## 0.6.3 (July 8, 2013)\n\nFeatures:\n\n - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176)\n\nBugfixes:\n\n - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177)\n\n## 0.6.2 (June 6, 2013)\n\nFeatures:\n\n - Rails 2.3 compatibility (@bhcarpenter, #167)\n - Adding possibility to store overridden alternative (@duksis, #173)\n\nMisc:\n\n - Now testing against multiple versions of rails\n\n## 0.6.1 (May 4, 2013)\n\nBugfixes:\n\n - Use the specified algorithm for the experiment instead of the default (@woodhull, #165)\n\nMisc:\n\n - Ensure experiements are valid when configuring (@ashmckenzie, #159)\n - Allow arrays to be passed to ab_test (@fenelon, #156)\n\n## 0.6.0 (April 4, 2013)\n\nFeatures:\n\n - Support for Ruby 2.0.0 (@phoet, #142)\n - Multiple Goals (@liujin, #109)\n - Ignoring IPs using Regular Expressions (@waynemoore, #119)\n - Added ability to add more bots to the default list (@themgt, #140)\n - Allow custom configuration of user blocking logic (@phoet , #148)\n\nBugfixes:\n\n - Fixed regression in handling of config files (@iangreenleaf, #115)\n - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67)\n - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126)\n\nMisc:\n\n - updated minimum json version requirement\n - Refactor Yaml Configuration (@rtwomey, #124)\n - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118)\n - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko)\n - Improved Readme (@iangreenleaf @phoet)\n\n## 0.5.0 (January 28, 2013)\n\nFeatures:\n\n - Persistence Adapters: Cookies and Session (@patbenatar, #98)\n - Configure experiments from a hash (@iangreenleaf, #97)\n - Pluggable sampling algorithms (@woodhull, #105)\n\nBugfixes:\n\n - Fixed negative number of non-finished rates (@philnash, #83)\n - Fixed behaviour of finished(:reset => false) (@philnash, #88)\n - Only take into consideration positive z-scores (@thomasmaas, #96)\n - Amended ab_test method to raise ArgumentError if passed integers or symbols as\n alternatives (@buddhamagnet, #81)\n\n## 0.4.6 (October 28, 2012)\n\nFeatures:\n\n - General code quality improvements (@buddhamagnet, #79)\n\nBugfixes:\n\n - Don't increment the experiment counter if user has finished (@dimko, #78)\n - Fixed an incorrect test (@jaywengrow, #74)\n\n## 0.4.5 (August 30, 2012)\n\nBugfixes:\n\n - Fixed header gradient in FF/Opera (@philnash, #69)\n - Fixed reseting of experiment in session (@apsoto, #43)\n\n## 0.4.4 (August 9, 2012)\n\nFeatures:\n\n - Allow parameter overrides, even without Redis. (@bhcarpenter, #62)\n\nBugfixes:\n\n - Fixes version number always increasing when alternatives are changed (@philnash, #63)\n - updated guard-rspec to version 1.2\n\n## 0.4.3 (July 8, 2012)\n\nFeatures:\n\n - redis failover now recovers from all redis-related exceptions\n\n## 0.4.2 (June 1, 2012)\n\nFeatures:\n\n - Now works with v3.0 of redis gem\n\nBugfixes:\n\n - Fixed redis failover on Rubinius\n\n## 0.4.1 (April 6, 2012)\n\nFeatures:\n\n - Added configuration option to disable Split testing (@ilyakatz, #45)\n\nBugfixes:\n\n - Fix weights for existing experiments (@andreas, #40)\n - Fixed dashboard range error (@andrew, #42)\n\n## 0.4.0 (March 7, 2012)\n\n**IMPORTANT**\n\nIf using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug.\n\nFeatures:\n\n - Experiments now record when they were started (@vrish88, #35)\n - Old versions of experiments in sessions are now cleaned up\n - Avoid users participating in multiple experiments at once (#21)\n\nBugfixes:\n\n - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34)\n - confidence_level helper should handle tiny z-scores (#23)\n\n## 0.3.3 (February 16, 2012)\n\nBugfixes:\n\n - Fixed redis failover when a block was passed to ab_test (@layflags, #33)\n\n## 0.3.2 (February 12, 2012)\n\nFeatures:\n\n - Handle redis errors gracefully (@layflags, #32)\n\n## 0.3.1 (November 19, 2011)\n\nFeatures:\n\n - General code tidy up (@ryanlecompte, #22, @mocoso, #28)\n - Lazy loading data from Redis (@lautis, #25)\n\nBugfixes:\n\n - Handle unstarted experiments (@mocoso, #27)\n - Relaxed Sinatra version requirement (@martinclu, #24)\n\n\n## 0.3.0 (October 9, 2011)\n\nFeatures:\n\n - Redesigned dashboard (@mrappleton, #17)\n - Use atomic increments in redis for better concurrency (@lautis, #18)\n - Weighted alternatives\n\nBugfixes:\n\n - Fix to allow overriding of experiments that aren't on version 1\n\n\n## 0.2.4 (July 18, 2011)\n\nFeatures:\n\n - Added option to finished to not reset the users session\n\nBugfixes:\n\n - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols\n\n## 0.2.3 (June 26, 2011)\n\nFeatures:\n\n - Experiments can now be deleted from the dashboard\n - ab_test helper now accepts a block\n - Improved dashboard\n\nBugfixes:\n\n - After resetting an experiment, existing users of that experiment will also be reset\n\n## 0.2.2 (June 11, 2011)\n\nFeatures:\n\n - Updated redis-namespace requirement to 1.0.3\n - Added a configuration object for changing options\n - Robot regex can now be changed via a configuration options\n - Added ability to ignore visits from specified IP addresses\n - Dashboard now shows percentage improvement of alternatives compared to the control\n - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives\n\nBugfixes:\n\n - Saving an experiment multiple times no longer creates duplicate alternatives\n\n## 0.2.1 (May 29, 2011)\n\nBugfixes:\n\n - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x\n\n## 0.2.0 (May 29, 2011)\n\nFeatures:\n\n - Override an alternative via a url parameter\n - Experiments can now be reset from the dashboard\n - The first alternative is now considered the control\n - General dashboard usability improvements\n - Robots are ignored and given the control alternative\n\nBugfixes:\n\n - Alternatives are now store in a list rather than a set to ensure consistent ordering\n - Fixed diving by zero errors\n\n## 0.1.1 (May 18, 2011)\n\nBugfixes:\n\n - More Robust conversion rate display on dashboard\n - Ensure `Split::Version` is available everywhere, fixed dashboard\n\n## 0.1.0 (May 17, 2011)\n\nInitial Release\n\n Update CHANGELOG.md\n\n @@ -1,3 +1,23 @@\n+## Unreleased 4.0.0\n+\n+Bugfixes:\n+- ab_test must return metadata on error or if split is disabled/excluded user (@andrehjr, #622)\n+- Fix versioned experiments when used with allow_multiple_experiments=control (@andrehjr, #613)\n+- Only block Pinterest bot (@huoxito, #606)\n+- Respect experiment defaults when loading experiments in initializer. (@mattwd7, #599)\n+\n+Features:\n+- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625)\n+- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616)\n+- Introduce enable/disable experiment cohorting (@robin-phung, #615)\n+- Add on_experiment_winner_choose callback (@GenaMinenkov, #574)\n+\n+Misc:\n+- Drop support for Ruby < 2.5 (@andrehjr, #627)\n+- Drop support for Rails < 5 (@andrehkr, #607)\n+- Bump minimum required redis to 4.2 (@andrehjr, #628)\n+- Removed repeated loading from config (@robin-phung, #619)\n+\n ## 3.4.1 (November 12th, 2019)\n \n Bugfixes:\n"},"addition_count":{"kind":"number","value":20,"string":"20"},"commit_subject":{"kind":"string","value":"Update CHANGELOG.md"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675180,"cells":{"id":{"kind":"string","value":"10070830"},"text":{"kind":"string","value":" CHANGELOG.md\n ## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Bump minimum required redis to 4.2 (@andrehjr, #628)\n- Removed repeated loading from config (@robin-phung, #619)\n- Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632)\n- Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631)\n- Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630)\n- Fix typo of in `Split::Trial` class variable (TomasBarry, #644)\n- Single HSET to update values, instead of multiple ones (@andrehjr, #640)\n- Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659)\n- Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639)\n- Adding documentation related to what is stored on cookies. (@andrehjr, #634)\n- Keep railtie defined under the Split gem namespace (@avit, #666)\n- Update RSpec helper to support block syntax (@clowder, #665)\n\n## 3.4.1 (November 12th, 2019)\n\nBugfixes:\n- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602)\n\n## 3.4.0 (November 9th, 2019)\n\nFeatures:\n- Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release.\n- Make dashboard pagination default \"per\" param configurable (@alopatin, #597)\n\nBugfixes:\n- Fix `force_alternative` for experiments with incremented version (@giraffate, #568)\n- Persist alternative weights (@giraffate, #570)\n- Combined experiment performance improvements (@gnanou, #575)\n- Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577)\n- When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582)\n\nMisc:\n- Remove `rubyforge_project` from gemspec (@giraffate, #583)\n- Fix URLs to replace http with https (@giraffate , #584)\n- Lazily include split helpers in ActionController::Base (@hasghari, #586)\n- Fix unused variable warnings (@andrehjr, #592)\n- Fix ruby warnings (@andrehjr, #593)\n- Update rubocop.yml config (@andrehjr, #594)\n- Add frozen_string_literal to all files that were missing it (@andrehjr, #595)\n\n## 3.3.2 (April 12th, 2019)\n\nFeatures:\n- Added uptime robot to configuration.rb (@razel1982, #556)\n- Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555)\n\nBugfixes:\n- Fix error message interpolation (@hanibash, #553)\n- Fix Bigdecimal warnings (@agraves, #551)\n- Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544)\n- Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr)\n\nMisc:\n- Update travis to add Rails 6 (@edmilton, #559)\n- Fix broken specs in developement environment (@dougpetronilio, #557)\n\n## 3.3.1 (January 11th, 2019)\n\nFeatures:\n- Filter some more bots (@janosch-x, #542)\n\nBugfixes:\n- Fix Dashboard Pagination Helper typo (@cattekin, #541)\n- Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539)\n- fix user participating alternative not found (@NaturalHokke, #536)\n\nMisc:\n- Tweak RSpec instructions (@eliotsykes, #540)\n- Improve README regarding rspec usage (@vermaxik, #538)\n\n## 3.3.0 (August 13th, 2018)\n\nFeatures:\n\n- Added pagination for dashboard (@GeorgeGorbanev, #518)\n- Add Facebot crawler to list of bots (@pfeiffer, #530)\n- Ignore previewing requests (@pfeiffer, #531)\n- Fix binding of ignore_filter (@pfeiffer, #533)\n\nBugfixes:\n\n- Fix cookie header duplication (@andrehjr, #522)\n\nPerformance:\n\n- Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509)\n\nMisc:\n\n- Update development dependencies\n- test rails 5.2 on travis (@lostapathy, #524)\n- update ruby versions for travis (@lostapathy, #525)\n\n## 3.2.0 (September 21st, 2017)\n\nFeatures:\n\n- Allow configuration of how often winning alternatives are recalculated (@patbl, #501)\n\nBugfixes:\n\n- Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503)\n- Fix combined experiments (@semanticart, #502)\n\n## 3.1.1 (August 30th, 2017)\n\nBugfixes:\n\n- Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498)\n\nMisc:\n\n- Document testing with RSpec (@eliotsykes, #495)\n\n## 3.1.0 (August 14th, 2017)\n\nFeatures:\n\n- Support for combined experiments (@daviddening, #493)\n- Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490)\n- Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487)\n\nBugfixes:\n\n- Blocked a few more common bot user agents (@kylerippey, #485)\n\nMisc:\n\n- Repository Audit by Maintainer.io (@RichardLitt, #484)\n- Update development dependencies\n- Test on ruby 2.4.1\n- Test compatibility with rails 5.1\n- Add uris to metadata section in gemspec\n\n## 3.0.0 (March 30th, 2017)\n\nFeatures:\n\n- added block randomization algorithm and specs (@hulleywood, #475)\n- Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460)\n\nBugfixes:\n\n- Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470)\n- Fix issue where redis isn't required (@tomciopp , #466)\n\nMisc:\n\n- Avoid variable_size_secure_compare private method (@eliotsykes, #465)\n\n## 2.2.0 (November 11th, 2016)\n\n**Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset.\n\nFeatures:\n\n- Remove dependency on Redis::Namespace (@bschaeffer, #425)\n- Make resetting on experiment change optional (@moggyboy, #430)\n- Add ability to force alternative on dashboard (@ccallebs, #437)\n\nBugfixes:\n\n- Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432)\n\nMisc:\n\n- Remove Explicit Return (@BradHudson, #441)\n- Update Redis config docs (@bschaeffer, #422)\n- Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443)\n- Removed a couple old ruby 1.8 hacks (@andrew, #456)\n- Run tests on rails 5 (@andrew, #457)\n- Fixed a few codeclimate warnings (@andrew, #458)\n- Use codeclimate for test coverage (@andrew #455)\n\n## 2.1.0 (August 8th, 2016)\n\nFeatures:\n\n- Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426)\n\n## 2.0.0 (July 17th, 2016)\n\nBreaking changes:\n\n- Removed deprecated `finished` and `begin_experiment` methods\n- Namespaced override param to avoid potential clashes (@henrik, #398)\n\n## 1.7.0 (June 28th, 2016)\n\nFeatures:\n\n- Running concurrent experiments on same endpoint/view (@karmakaze, #421)\n\n## 1.6.0 (June 16th, 2016)\n\nFeatures:\n\n- Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420)\n\n## 1.5.0 (June 8th, 2016)\n\nFeatures:\n\n- Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409)\n- Optional custom persistence adapter (@ndelage, #411)\n\nMisc:\n\n- Use fakeredis for testing (@andrew, #412)\n\n## 1.4.5 (June 7th, 2016)\n\nBugfixes:\n\n- FIX Negative numbers on non-finished (@divineforest, #408)\n- Eliminate extra RedisAdapter hget (@karmakaze, #407)\n- Remove unecessary code from Experiment class (@pakallis, #391, #392, #393)\n\nMisc:\n\n- Simplify Configuration#normalized_experiments (@pakallis, #395)\n- Clarify test running instructions (@henrik, #397)\n\n## 1.4.4 (May 9th, 2016)\n\nBugfixes:\n\n- Increment participation if store override is true and no experiment key exists (@spheric, #380)\n\nMisc:\n\n- Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389)\n- Added minimum version requirement to simple-random\n- Clarify finished with first option being a hash in Readme (@henrik, #382)\n- Refactoring the User abstraction (@andreibondarev, #384)\n\n## 1.4.3 (April 28th, 2016)\n\nFeatures:\n\n- add on_trial callback whenever a trial is started (@mtyeh411, #375)\n\nBugfixes:\n\n- Allow algorithm configuration at experiment level (@007sumit, #376)\n\nMisc:\n\n- only choose override if it exists as valid alternative (@spheric, #377)\n\n## 1.4.2 (April 25th, 2016)\n\nMisc:\n\n- Deprecated some legacy methods (@andreibondarev, #374)\n\n## 1.4.1 (April 21st, 2016)\n\nBugfixes:\n\n- respect manual start configuration after an experiment has been deleted (@mtyeh411, #372)\n\nMisc:\n\n- Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365)\n- Revise specs according to http://betterspecs.org/ (@hkliya, #369)\n\n## 1.4.0 (April 2nd, 2016)\n\nFeatures:\n\n- Added experiment filters to dashboard (@ccallebs, #363, #364)\n- Added Contributor Covenant Code of Conduct\n\n## 1.3.2 (January 2nd, 2016)\n\nBugfixes:\n\n- Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352)\n\n## 1.3.1 (January 1st, 2016)\n\nBugfixes:\n\n- Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349)\n\n## 1.3.0 (October 20th, 2015)\n\nFeatures:\n\n - allow for custom redis_url different from ENV variable (@davidgrieser, #323)\n - add ability to change the length of the persistence cookie (@peterylai, #335)\n\nBugfixes:\n\n - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342)\n - Fix active experiments when experiment is on a later version (@ndrisso, #331)\n - Fix caching of winning alternative (@nfm, #329)\n\nMisc:\n\n - Remove duplication from Experiment#save (@pakallis, #333)\n - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332)\n\n## 1.2.1 (May 17th, 2015)\n\nFeatures:\n\n - Handle redis DNS resolution failures gracefully (@fusion2004, #310)\n - Push metadata to ab_test block (@ekorneeff, #296)\n - Helper methods are now private when included in controllers (@ipoval, #303)\n\nBugfixes:\n\n - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313)\n - Don't use capture helper from ActionView (@tomasdundacek, #312)\n\nMisc:\n\n - Remove body \"max-width\" from dashboard (@xicreative, #299)\n - fix private for class methods (@ipoval, #301)\n - minor memoization fix in spec (@ipoval, #304)\n - Minor documentation fixes (#295, #297, #305, #308)\n\n## 1.2.0 (January 24th, 2015)\n\nFeatures:\n\n - Configure redis using environment variables if available (@saratovsource , #293)\n - Store metadata on experiment configuration (@dekz, #291)\n\nBugfixes:\n\n - Revert the Trial#complete! public API to support noargs (@dekz, #292)\n\n## 1.1.0 (January 9th, 2015)\n\nChanges:\n\n - Public class methods on `Split::Experiment` (e.g., `find_or_create`)\n have been moved to `Split::ExperimentCatalog`.\n\nFeatures:\n\n - Decouple trial from Split::Helper (@joshdover, #286)\n - Helper method for Active Experiments (@blahblahblah-, #273)\n\nMisc:\n\n - Use the new travis container based infrastructure for tests (@andrew, #280)\n\n## 1.0.0 (October 12th, 2014)\n\nChanges:\n\n - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271)\n\n## 0.8.0 (September 25th, 2014)\n\nFeatures:\n\n - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251)\n - support multiple metrics per experiment (@stevenou, #260)\n\nBugfixes:\n\n - Avoiding call to params in EncapsulatedHelper (@afn, #257)\n\n## 0.7.3 (September 16th, 2014)\n\nFeatures:\n\n - Disable all split tests via a URL parameter (@hwartig, #263)\n\nBugfixes:\n\n - Correctly escape experiment names on dashboard (@ecaron, #265)\n - Handle redis connection exception error properly (@andrew, #245)\n\n## 0.7.2 (June 12th, 2014)\n\nFeatures:\n\n - Show metrics on the dashboard (@swrobel, #241)\n\nBugfixes:\n\n - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253)\n - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255)\n\n## 0.7.1 (March 20th, 2014)\n\nFeatures:\n\n - You can now reopen experiment from the dashboard (@mikezaby, #235)\n\nMisc:\n\n - Internal code tidy up (@IanVaughan, #238)\n\n## 0.7.0 (December 26th, 2013)\n\nFeatures:\n\n - Significantly improved z-score algorithm (@caser ,#221)\n - Better sorting of Experiments on dashboard (@wadako111, #218)\n\nBugfixes:\n\n - Fixed start button not being displayed in some cases (@vigosan, #219)\n\nMisc:\n\n - Experiment#initialize refactoring (@nberger, #224)\n - Extract ExperimentStore into a seperate class (@nberger, #225)\n\n## 0.6.6 (October 15th, 2013)\n\nFeatures:\n\n - Sort experiments on Dashboard so \"active\" ones without a winner appear first (@swrobel, #204)\n - Starting tests manually (@duksis, #209)\n\nBugfixes:\n\n - Only trigger completion callback with valid Trial (@segfaultAX, #208)\n - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213)\n\nMisc:\n\n - Added more bots to filter list (@lbeder, #214, #215, #216)\n\n## 0.6.5 (August 23, 2013)\n\nFeatures:\n\n - Added Redis adapter for persisting experiments across sessions (@fengb, #203)\n\nMisc:\n\n - Expand upon algorithms section in README (@swrobel, #200)\n\n## 0.6.4 (August 8, 2013)\n\nFeatures:\n\n - Add hooks for experiment deletion and resetting (@craigmcnamara, #198)\n - Allow Split::Helper to be used outside of a controller (@nfm, #190)\n - Show current Rails/Rack Env in dashboard (@rceee, #187)\n\nBugfixes:\n\n - Fix whiplash algorithm when using goals (@swrobel, #193)\n\nMisc:\n\n - Refactor dashboard js (@buddhamagnet)\n\n## 0.6.3 (July 8, 2013)\n\nFeatures:\n\n - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176)\n\nBugfixes:\n\n - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177)\n\n## 0.6.2 (June 6, 2013)\n\nFeatures:\n\n - Rails 2.3 compatibility (@bhcarpenter, #167)\n - Adding possibility to store overridden alternative (@duksis, #173)\n\nMisc:\n\n - Now testing against multiple versions of rails\n\n## 0.6.1 (May 4, 2013)\n\nBugfixes:\n\n - Use the specified algorithm for the experiment instead of the default (@woodhull, #165)\n\nMisc:\n\n - Ensure experiements are valid when configuring (@ashmckenzie, #159)\n - Allow arrays to be passed to ab_test (@fenelon, #156)\n\n## 0.6.0 (April 4, 2013)\n\nFeatures:\n\n - Support for Ruby 2.0.0 (@phoet, #142)\n - Multiple Goals (@liujin, #109)\n - Ignoring IPs using Regular Expressions (@waynemoore, #119)\n - Added ability to add more bots to the default list (@themgt, #140)\n - Allow custom configuration of user blocking logic (@phoet , #148)\n\nBugfixes:\n\n - Fixed regression in handling of config files (@iangreenleaf, #115)\n - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67)\n - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126)\n\nMisc:\n\n - updated minimum json version requirement\n - Refactor Yaml Configuration (@rtwomey, #124)\n - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118)\n - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko)\n - Improved Readme (@iangreenleaf @phoet)\n\n## 0.5.0 (January 28, 2013)\n\nFeatures:\n\n - Persistence Adapters: Cookies and Session (@patbenatar, #98)\n - Configure experiments from a hash (@iangreenleaf, #97)\n - Pluggable sampling algorithms (@woodhull, #105)\n\nBugfixes:\n\n - Fixed negative number of non-finished rates (@philnash, #83)\n - Fixed behaviour of finished(:reset => false) (@philnash, #88)\n - Only take into consideration positive z-scores (@thomasmaas, #96)\n - Amended ab_test method to raise ArgumentError if passed integers or symbols as\n alternatives (@buddhamagnet, #81)\n\n## 0.4.6 (October 28, 2012)\n\nFeatures:\n\n - General code quality improvements (@buddhamagnet, #79)\n\nBugfixes:\n\n - Don't increment the experiment counter if user has finished (@dimko, #78)\n - Fixed an incorrect test (@jaywengrow, #74)\n\n## 0.4.5 (August 30, 2012)\n\nBugfixes:\n\n - Fixed header gradient in FF/Opera (@philnash, #69)\n - Fixed reseting of experiment in session (@apsoto, #43)\n\n## 0.4.4 (August 9, 2012)\n\nFeatures:\n\n - Allow parameter overrides, even without Redis. (@bhcarpenter, #62)\n\nBugfixes:\n\n - Fixes version number always increasing when alternatives are changed (@philnash, #63)\n - updated guard-rspec to version 1.2\n\n## 0.4.3 (July 8, 2012)\n\nFeatures:\n\n - redis failover now recovers from all redis-related exceptions\n\n## 0.4.2 (June 1, 2012)\n\nFeatures:\n\n - Now works with v3.0 of redis gem\n\nBugfixes:\n\n - Fixed redis failover on Rubinius\n\n## 0.4.1 (April 6, 2012)\n\nFeatures:\n\n - Added configuration option to disable Split testing (@ilyakatz, #45)\n\nBugfixes:\n\n - Fix weights for existing experiments (@andreas, #40)\n - Fixed dashboard range error (@andrew, #42)\n\n## 0.4.0 (March 7, 2012)\n\n**IMPORTANT**\n\nIf using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug.\n\nFeatures:\n\n - Experiments now record when they were started (@vrish88, #35)\n - Old versions of experiments in sessions are now cleaned up\n - Avoid users participating in multiple experiments at once (#21)\n\nBugfixes:\n\n - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34)\n - confidence_level helper should handle tiny z-scores (#23)\n\n## 0.3.3 (February 16, 2012)\n\nBugfixes:\n\n - Fixed redis failover when a block was passed to ab_test (@layflags, #33)\n\n## 0.3.2 (February 12, 2012)\n\nFeatures:\n\n - Handle redis errors gracefully (@layflags, #32)\n\n## 0.3.1 (November 19, 2011)\n\nFeatures:\n\n - General code tidy up (@ryanlecompte, #22, @mocoso, #28)\n - Lazy loading data from Redis (@lautis, #25)\n\nBugfixes:\n\n - Handle unstarted experiments (@mocoso, #27)\n - Relaxed Sinatra version requirement (@martinclu, #24)\n\n\n## 0.3.0 (October 9, 2011)\n\nFeatures:\n\n - Redesigned dashboard (@mrappleton, #17)\n - Use atomic increments in redis for better concurrency (@lautis, #18)\n - Weighted alternatives\n\nBugfixes:\n\n - Fix to allow overriding of experiments that aren't on version 1\n\n\n## 0.2.4 (July 18, 2011)\n\nFeatures:\n\n - Added option to finished to not reset the users session\n\nBugfixes:\n\n - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols\n\n## 0.2.3 (June 26, 2011)\n\nFeatures:\n\n - Experiments can now be deleted from the dashboard\n - ab_test helper now accepts a block\n - Improved dashboard\n\nBugfixes:\n\n - After resetting an experiment, existing users of that experiment will also be reset\n\n## 0.2.2 (June 11, 2011)\n\nFeatures:\n\n - Updated redis-namespace requirement to 1.0.3\n - Added a configuration object for changing options\n - Robot regex can now be changed via a configuration options\n - Added ability to ignore visits from specified IP addresses\n - Dashboard now shows percentage improvement of alternatives compared to the control\n - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives\n\nBugfixes:\n\n - Saving an experiment multiple times no longer creates duplicate alternatives\n\n## 0.2.1 (May 29, 2011)\n\nBugfixes:\n\n - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x\n\n## 0.2.0 (May 29, 2011)\n\nFeatures:\n\n - Override an alternative via a url parameter\n - Experiments can now be reset from the dashboard\n - The first alternative is now considered the control\n - General dashboard usability improvements\n - Robots are ignored and given the control alternative\n\nBugfixes:\n\n - Alternatives are now store in a list rather than a set to ensure consistent ordering\n - Fixed diving by zero errors\n\n## 0.1.1 (May 18, 2011)\n\nBugfixes:\n\n - More Robust conversion rate display on dashboard\n - Ensure `Split::Version` is available everywhere, fixed dashboard\n\n## 0.1.0 (May 17, 2011)\n\nInitial Release\n\n Update CHANGELOG.md\n\n @@ -1,3 +1,23 @@\n+## Unreleased 4.0.0\n+\n+Bugfixes:\n+- ab_test must return metadata on error or if split is disabled/excluded user (@andrehjr, #622)\n+- Fix versioned experiments when used with allow_multiple_experiments=control (@andrehjr, #613)\n+- Only block Pinterest bot (@huoxito, #606)\n+- Respect experiment defaults when loading experiments in initializer. (@mattwd7, #599)\n+\n+Features:\n+- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625)\n+- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616)\n+- Introduce enable/disable experiment cohorting (@robin-phung, #615)\n+- Add on_experiment_winner_choose callback (@GenaMinenkov, #574)\n+\n+Misc:\n+- Drop support for Ruby < 2.5 (@andrehjr, #627)\n+- Drop support for Rails < 5 (@andrehkr, #607)\n+- Bump minimum required redis to 4.2 (@andrehjr, #628)\n+- Removed repeated loading from config (@robin-phung, #619)\n+\n ## 3.4.1 (November 12th, 2019)\n \n Bugfixes:\n"},"addition_count":{"kind":"number","value":20,"string":"20"},"commit_subject":{"kind":"string","value":"Update CHANGELOG.md"},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675181,"cells":{"id":{"kind":"string","value":"10070831"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n this.message = options.message;\n this.icon = options.icon;\n this.timestamp = Date.now();\n this.duration = 2400;\n this.hovered = false;\n this.manifest = {};\n $('#meows').append($(document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp)\n .addClass('meow')\n .html($(document.createElement('div')).addClass('inner').text(this.message))\n .hide()\n .fadeIn(400));\n\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n title,\n message,\n icon,\n message_type;\n\n if (typeof options.title === 'string') {\n title = options.title;\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (typeof options.icon === 'string') {\n icon = options.icon;\n }\n return {\n trigger: trigger,\n message: message,\n icon: icon,\n message_type: message_type\n }\n },\n this.timeout = window.setTimeout(function () {\n // Make sure this meow hasn't already been destroyed\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n if (that.hovered !== true && typeof that === 'object') {\n that.destroy();\n }\n }\n }, that.duration);\n }\n\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n }\n }\n });\n });\n }\n };\n };\n\n $.fn.meow = function (args) {\n var meow = new Meow(args);\n meows.add(meow);\n return meow;\n };\n $.meow = $.fn.meow;\n}(jQuery, window));\n\n configurable duration, correcting title parameter, allowing use of html inside box\n\n @@ -33,13 +33,13 @@\n this.message = options.message;\n this.icon = options.icon;\n this.timestamp = Date.now();\n- this.duration = 2400;\n+ this.duration = options.duration || 2400;\n this.hovered = false;\n this.manifest = {};\n $('#meows').append($(document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp)\n .addClass('meow')\n- .html($(document.createElement('div')).addClass('inner').text(this.message))\n+ .html($(document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n \n@@ -94,7 +94,8 @@\n title,\n message,\n icon,\n- message_type;\n+ message_type,\n+ duration;\n \n if (typeof options.title === 'string') {\n title = options.title;\n@@ -125,10 +126,15 @@\n if (typeof options.icon === 'string') {\n icon = options.icon;\n }\n+\n+ duration = options.duration;\n+\n return {\n trigger: trigger,\n message: message,\n icon: icon,\n+ title: title,\n+ duration: duration,\n message_type: message_type\n }\n },\n"},"addition_count":{"kind":"number","value":9,"string":"9"},"commit_subject":{"kind":"string","value":"configurable duration, correcting title parameter, allowing use of html inside box"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10675182,"cells":{"id":{"kind":"string","value":"10070832"},"text":{"kind":"string","value":" jquery.meow.js\n (function ($, window) {\n 'use strict';\n // Meow queue\n var default_meow_area,\n meows = {\n queue: {},\n add: function (meow) {\n this.queue[meow.timestamp] = meow;\n },\n get: function (timestamp) {\n return this.queue[timestamp];\n },\n remove: function (timestamp) {\n delete this.queue[timestamp];\n },\n size: function () {\n var timestamp,\n size = 0;\n for (timestamp in this.queue) {\n if (this.queue.hasOwnProperty(timestamp)) { size += 1; }\n }\n return size;\n }\n },\n // Meow constructor\n Meow = function (options) {\n var that = this;\n\n this.timestamp = new Date().getTime(); // used to identify this meow and timeout\n this.hovered = false; // whether mouse is over or not\n\n if (typeof default_meow_area === 'undefined'\n this.message = options.message;\n this.icon = options.icon;\n this.timestamp = Date.now();\n this.duration = 2400;\n this.hovered = false;\n this.manifest = {};\n $('#meows').append($(document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp)\n .addClass('meow')\n .html($(document.createElement('div')).addClass('inner').text(this.message))\n .hide()\n .fadeIn(400));\n\n this.container = $(options.container);\n } else {\n this.container = default_meow_area;\n }\n\n\n if (typeof options.title === 'string') {\n this.title = options.title;\n }\n\n if (typeof options.message === 'string') {\n this.message = options.message;\n } else if (options.message instanceof $) {\n if (options.message.is('input,textarea,select')) {\n this.message = options.message.val();\n } else {\n this.message = options.message.text();\n }\n\n if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') {\n this.title = options.message.attr('title');\n }\n }\n\n if (typeof options.icon === 'string') {\n this.icon = options.icon;\n }\n if (options.sticky) {\n this.duration = Infinity;\n } else {\n this.duration = options.duration || 5000;\n }\n\n // Call callback if it's defined (this = meow object)\n if (typeof options.beforeCreate === 'function') {\n options.beforeCreate.call(that);\n }\n\n // Add the meow to the meow area\n this.container.append($(window.document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp.toString())\n .addClass('meow')\n .html($(window.document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n\n this.manifest = $('#meow-' + this.timestamp.toString());\n\n title,\n message,\n icon,\n message_type;\n\n if (typeof options.title === 'string') {\n title = options.title;\n if (typeof that.icon === 'string') {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('div')).addClass('icon').html(\n $(window.document.createElement('img')).attr('src', this.icon)\n )\n );\n }\n\n // Add close button if the meow isn't uncloseable\n // TODO: this close button needs to be much prettier\n if (options.closeable !== false) {\n this.manifest.find('.inner').prepend(\n $(window.document.createElement('a'))\n .addClass('close')\n .html('&times;')\n .attr('href', '#close-meow-' + that.timestamp)\n .click(function (e) {\n e.preventDefault();\n that.destroy();\n })\n );\n }\n\n this.manifest.bind('mouseenter mouseleave', function (event) {\n if (typeof options.icon === 'string') {\n icon = options.icon;\n }\n return {\n trigger: trigger,\n message: message,\n icon: icon,\n message_type: message_type\n }\n },\n this.timeout = window.setTimeout(function () {\n // Make sure this meow hasn't already been destroyed\n if (typeof meows.get(that.timestamp) !== 'undefined') {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.onTimeout === 'function') {\n options.onTimeout.call(that.manifest);\n }\n // Don't destroy if user is hovering over meow\n if (that.hovered !== true && typeof that === 'object') {\n that.destroy();\n }\n }\n }, that.duration);\n }\n\n this.destroy = function () {\n if (that.destroyed !== true) {\n // Call callback if it's defined (this = meow DOM element)\n if (typeof options.beforeDestroy === 'function') {\n options.beforeDestroy.call(that.manifest);\n }\n that.manifest.find('.inner').fadeTo(400, 0, function () {\n that.manifest.slideUp(function () {\n that.manifest.remove();\n that.destroyed = true;\n meows.remove(that.timestamp);\n if (typeof options.afterDestroy === 'function') {\n options.afterDestroy.call(null);\n }\n if (meows.size() <= 0) {\n if (default_meow_area instanceof $) {\n default_meow_area.remove();\n default_meow_area = undefined;\n }\n if (typeof options.afterDestroyLast === 'function') {\n options.afterDestroyLast.call(null);\n }\n }\n });\n });\n }\n };\n };\n\n $.fn.meow = function (args) {\n var meow = new Meow(args);\n meows.add(meow);\n return meow;\n };\n $.meow = $.fn.meow;\n}(jQuery, window));\n\n configurable duration, correcting title parameter, allowing use of html inside box\n\n @@ -33,13 +33,13 @@\n this.message = options.message;\n this.icon = options.icon;\n this.timestamp = Date.now();\n- this.duration = 2400;\n+ this.duration = options.duration || 2400;\n this.hovered = false;\n this.manifest = {};\n $('#meows').append($(document.createElement('div'))\n .attr('id', 'meow-' + this.timestamp)\n .addClass('meow')\n- .html($(document.createElement('div')).addClass('inner').text(this.message))\n+ .html($(document.createElement('div')).addClass('inner').html(this.message))\n .hide()\n .fadeIn(400));\n \n@@ -94,7 +94,8 @@\n title,\n message,\n icon,\n- message_type;\n+ message_type,\n+ duration;\n \n if (typeof options.title === 'string') {\n title = options.title;\n@@ -125,10 +126,15 @@\n if (typeof options.icon === 'string') {\n icon = options.icon;\n }\n+\n+ duration = options.duration;\n+\n return {\n trigger: trigger,\n message: message,\n icon: icon,\n+ title: title,\n+ duration: duration,\n message_type: message_type\n }\n },\n"},"addition_count":{"kind":"number","value":9,"string":"9"},"commit_subject":{"kind":"string","value":"configurable duration, correcting title parameter, allowing use of html inside box"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".js"},"lang":{"kind":"string","value":"meow"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"zacstewart/Meow"}}},{"rowIdx":10675183,"cells":{"id":{"kind":"string","value":"10070833"},"text":{"kind":"string","value":" user_spec.rb\n require 'spec_helper'\nrequire 'split/experiment_catalog'\nrequire 'split/experiment'\nrequire \"split/user\"\n\ndescribe Split::User do\n let(:user_keys) { { \"link_color\" => \"blue\" } }\n let(:context) { double(session: { split: user_keys }) }\n let(:experiment) { Split::Experiment.new(\"link_color\") }\n\n before(:each) do\n @subject = described_class.new(context)\n end\n\n it \"delegates methods correctly\" do\n expect(@subject[\"link_color\"]).to eq(@subject.user[\"link_color\"])\n end\n\n context \"#cleanup_old_versions!\" do\n let(:experiment_version) { \"#{experiment.name}:1\" }\n let(:second_experiment_version) { \"#{experiment.name}_another:1\" }\n let(:third_experiment_version) { \"variation_of_#{experiment.name}:1\" }\n let(:user_keys) do\n {\n experiment_version => \"blue\",\n second_experiment_version => \"red\",\n third_experiment_version => \"yellow\"\n }\n end\n\n before(:each) { @subject.cleanup_old_versions!(experiment) }\n\n it \"removes key if old experiment is found\" do\n expect(@subject.keys).not_to include(experiment_version)\n end\n\n it 'does not remove other keys' do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n end \n\n context '#cleanup_old_experiments!' do\n it 'removes key if experiment is not found' do\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(true)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has not started yet\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n context \"with finished key\" do\n let(:user_keys) { { \"link_color\" => \"blue\", \"link_color:finished\" => true } }\n\n it \"does not remove finished key for experiment without a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color:finished\").and_return(nil)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to include(\"link_color\")\n expect(@subject.keys).to include(\"link_color:finished\")\n end\n end\n\n context \"when already cleaned up\" do\n before do\n @subject.cleanup_old_experiments!\n end\n\n it \"does not clean up again\" do\n expect(@subject).to_not receive(:keys_without_finished)\n @subject.cleanup_old_experiments!\n end\n end\n end\n\n context \"allows user to be loaded from adapter\" do\n it \"loads user from adapter (RedisAdapter)\" do\n user = Split::Persistence::RedisAdapter.new(nil, 112233)\n user[\"foo\"] = \"bar\"\n\n ab_user = Split::User.find(112233, :redis)\n\n expect(ab_user[\"foo\"]).to eql(\"bar\")\n end\n\n it \"returns nil if adapter does not implement a finder method\" do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n\n end\n\n context \"instantiated with custom adapter\" do\n let(:custom_adapter) { double(:persistence_adapter) }\n\n before do\n @subject = described_class.new(context, custom_adapter)\n end\n\n it \"sets user to the custom adapter\" do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\n\nend\n\n Merge pull request #679 from splitrb/fix-layout-offenses\n\nFix all Layout issues on the project\n @@ -1,3 +1,5 @@\n+# frozen_string_literal: true\n+\n require 'spec_helper'\n require 'split/experiment_catalog'\n require 'split/experiment'\n@@ -37,7 +39,7 @@ describe Split::User do\n it 'does not remove other keys' do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n- end \n+ end\n \n context '#cleanup_old_experiments!' do\n it 'removes key if experiment is not found' do\n@@ -100,7 +102,6 @@ describe Split::User do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n-\n end\n \n context \"instantiated with custom adapter\" do\n@@ -114,5 +115,4 @@ describe Split::User do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\n-\n end\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Merge pull request #679 from splitrb/fix-layout-offenses"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675184,"cells":{"id":{"kind":"string","value":"10070834"},"text":{"kind":"string","value":" user_spec.rb\n require 'spec_helper'\nrequire 'split/experiment_catalog'\nrequire 'split/experiment'\nrequire \"split/user\"\n\ndescribe Split::User do\n let(:user_keys) { { \"link_color\" => \"blue\" } }\n let(:context) { double(session: { split: user_keys }) }\n let(:experiment) { Split::Experiment.new(\"link_color\") }\n\n before(:each) do\n @subject = described_class.new(context)\n end\n\n it \"delegates methods correctly\" do\n expect(@subject[\"link_color\"]).to eq(@subject.user[\"link_color\"])\n end\n\n context \"#cleanup_old_versions!\" do\n let(:experiment_version) { \"#{experiment.name}:1\" }\n let(:second_experiment_version) { \"#{experiment.name}_another:1\" }\n let(:third_experiment_version) { \"variation_of_#{experiment.name}:1\" }\n let(:user_keys) do\n {\n experiment_version => \"blue\",\n second_experiment_version => \"red\",\n third_experiment_version => \"yellow\"\n }\n end\n\n before(:each) { @subject.cleanup_old_versions!(experiment) }\n\n it \"removes key if old experiment is found\" do\n expect(@subject.keys).not_to include(experiment_version)\n end\n\n it 'does not remove other keys' do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n end \n\n context '#cleanup_old_experiments!' do\n it 'removes key if experiment is not found' do\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(true)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has not started yet\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n context \"with finished key\" do\n let(:user_keys) { { \"link_color\" => \"blue\", \"link_color:finished\" => true } }\n\n it \"does not remove finished key for experiment without a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color:finished\").and_return(nil)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to include(\"link_color\")\n expect(@subject.keys).to include(\"link_color:finished\")\n end\n end\n\n context \"when already cleaned up\" do\n before do\n @subject.cleanup_old_experiments!\n end\n\n it \"does not clean up again\" do\n expect(@subject).to_not receive(:keys_without_finished)\n @subject.cleanup_old_experiments!\n end\n end\n end\n\n context \"allows user to be loaded from adapter\" do\n it \"loads user from adapter (RedisAdapter)\" do\n user = Split::Persistence::RedisAdapter.new(nil, 112233)\n user[\"foo\"] = \"bar\"\n\n ab_user = Split::User.find(112233, :redis)\n\n expect(ab_user[\"foo\"]).to eql(\"bar\")\n end\n\n it \"returns nil if adapter does not implement a finder method\" do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n\n end\n\n context \"instantiated with custom adapter\" do\n let(:custom_adapter) { double(:persistence_adapter) }\n\n before do\n @subject = described_class.new(context, custom_adapter)\n end\n\n it \"sets user to the custom adapter\" do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\n\nend\n\n Merge pull request #679 from splitrb/fix-layout-offenses\n\nFix all Layout issues on the project\n @@ -1,3 +1,5 @@\n+# frozen_string_literal: true\n+\n require 'spec_helper'\n require 'split/experiment_catalog'\n require 'split/experiment'\n@@ -37,7 +39,7 @@ describe Split::User do\n it 'does not remove other keys' do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n- end \n+ end\n \n context '#cleanup_old_experiments!' do\n it 'removes key if experiment is not found' do\n@@ -100,7 +102,6 @@ describe Split::User do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n-\n end\n \n context \"instantiated with custom adapter\" do\n@@ -114,5 +115,4 @@ describe Split::User do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\n-\n end\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Merge pull request #679 from splitrb/fix-layout-offenses"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675185,"cells":{"id":{"kind":"string","value":"10070835"},"text":{"kind":"string","value":" user_spec.rb\n require 'spec_helper'\nrequire 'split/experiment_catalog'\nrequire 'split/experiment'\nrequire \"split/user\"\n\ndescribe Split::User do\n let(:user_keys) { { \"link_color\" => \"blue\" } }\n let(:context) { double(session: { split: user_keys }) }\n let(:experiment) { Split::Experiment.new(\"link_color\") }\n\n before(:each) do\n @subject = described_class.new(context)\n end\n\n it \"delegates methods correctly\" do\n expect(@subject[\"link_color\"]).to eq(@subject.user[\"link_color\"])\n end\n\n context \"#cleanup_old_versions!\" do\n let(:experiment_version) { \"#{experiment.name}:1\" }\n let(:second_experiment_version) { \"#{experiment.name}_another:1\" }\n let(:third_experiment_version) { \"variation_of_#{experiment.name}:1\" }\n let(:user_keys) do\n {\n experiment_version => \"blue\",\n second_experiment_version => \"red\",\n third_experiment_version => \"yellow\"\n }\n end\n\n before(:each) { @subject.cleanup_old_versions!(experiment) }\n\n it \"removes key if old experiment is found\" do\n expect(@subject.keys).not_to include(experiment_version)\n end\n\n it 'does not remove other keys' do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n end \n\n context '#cleanup_old_experiments!' do\n it 'removes key if experiment is not found' do\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(true)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has not started yet\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n context \"with finished key\" do\n let(:user_keys) { { \"link_color\" => \"blue\", \"link_color:finished\" => true } }\n\n it \"does not remove finished key for experiment without a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color:finished\").and_return(nil)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to include(\"link_color\")\n expect(@subject.keys).to include(\"link_color:finished\")\n end\n end\n\n context \"when already cleaned up\" do\n before do\n @subject.cleanup_old_experiments!\n end\n\n it \"does not clean up again\" do\n expect(@subject).to_not receive(:keys_without_finished)\n @subject.cleanup_old_experiments!\n end\n end\n end\n\n context \"allows user to be loaded from adapter\" do\n it \"loads user from adapter (RedisAdapter)\" do\n user = Split::Persistence::RedisAdapter.new(nil, 112233)\n user[\"foo\"] = \"bar\"\n\n ab_user = Split::User.find(112233, :redis)\n\n expect(ab_user[\"foo\"]).to eql(\"bar\")\n end\n\n it \"returns nil if adapter does not implement a finder method\" do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n\n end\n\n context \"instantiated with custom adapter\" do\n let(:custom_adapter) { double(:persistence_adapter) }\n\n before do\n @subject = described_class.new(context, custom_adapter)\n end\n\n it \"sets user to the custom adapter\" do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\n\nend\n\n Merge pull request #679 from splitrb/fix-layout-offenses\n\nFix all Layout issues on the project\n @@ -1,3 +1,5 @@\n+# frozen_string_literal: true\n+\n require 'spec_helper'\n require 'split/experiment_catalog'\n require 'split/experiment'\n@@ -37,7 +39,7 @@ describe Split::User do\n it 'does not remove other keys' do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n- end \n+ end\n \n context '#cleanup_old_experiments!' do\n it 'removes key if experiment is not found' do\n@@ -100,7 +102,6 @@ describe Split::User do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n-\n end\n \n context \"instantiated with custom adapter\" do\n@@ -114,5 +115,4 @@ describe Split::User do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\n-\n end\n"},"addition_count":{"kind":"number","value":3,"string":"3"},"commit_subject":{"kind":"string","value":"Merge pull request #679 from splitrb/fix-layout-offenses"},"deletion_count":{"kind":"number","value":3,"string":"3"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675186,"cells":{"id":{"kind":"string","value":"10070836"},"text":{"kind":"string","value":" user_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/experiment_catalog\"\n\ndescribe Split::User do\n let(:user_keys) { { 'link_color' => 'blue' } }\n let(:context) { double(:session => { split: user_keys }) }\n let(:experiment) { Split::Experiment.new('link_color') }\n\n before(:each) do\n\n before(:each) do\n @subject = described_class.new(context)\n end\n\n it \"delegates methods correctly\" do\n expect(@subject[\"link_color\"]).to eq(@subject.user[\"link_color\"])\n end\n\n context \"#cleanup_old_versions!\" do\n let(:experiment_version) { \"#{experiment.name}:1\" }\n let(:second_experiment_version) { \"#{experiment.name}_another:1\" }\n let(:third_experiment_version) { \"variation_of_#{experiment.name}:1\" }\n let(:user_keys) do\n {\n experiment_version => \"blue\",\n second_experiment_version => \"red\",\n third_experiment_version => \"yellow\"\n }\n end\n\n before(:each) { @subject.cleanup_old_versions!(experiment) }\n\n it \"removes key if old experiment is found\" do\n expect(@subject.keys).not_to include(experiment_version)\n end\n\n it \"does not remove other keys\" do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n end\n\n context \"#cleanup_old_experiments!\" do\n it \"removes key if experiment is not found\" do\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(true)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has not started yet\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n context \"with finished key\" do\n let(:user_keys) { { \"link_color\" => \"blue\", \"link_color:finished\" => true } }\n\n it \"does not remove finished key for experiment without a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color:finished\").and_return(nil)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to include(\"link_color\")\n expect(@subject.keys).to include(\"link_color:finished\")\n end\n end\n\n context \"when already cleaned up\" do\n before do\n @subject.cleanup_old_experiments!\n end\n\n it \"does not clean up again\" do\n expect(@subject).to_not receive(:keys_without_finished)\n @subject.cleanup_old_experiments!\n end\n end\n end\n\n context \"allows user to be loaded from adapter\" do\n it \"loads user from adapter (RedisAdapter)\" do\n user = Split::Persistence::RedisAdapter.new(nil, 112233)\n user[\"foo\"] = \"bar\"\n\n ab_user = Split::User.find(112233, :redis)\n\n expect(ab_user[\"foo\"]).to eql(\"bar\")\n end\n\n it \"returns nil if adapter does not implement a finder method\" do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n end\n\n context \"instantiated with custom adapter\" do\n let(:custom_adapter) { double(:persistence_adapter) }\n\n before do\n @subject = described_class.new(context, custom_adapter)\n end\n\n it \"sets user to the custom adapter\" do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\nend\n\n Merge pull request #678 from splitrb/fix-hash-syntax-offenses\n\nFix Style/HashSyntax offenses\n @@ -5,7 +5,7 @@ require 'split/user'\n \n describe Split::User do\n let(:user_keys) { { 'link_color' => 'blue' } }\n- let(:context) { double(:session => { split: user_keys }) }\n+ let(:context) { double(session: { split: user_keys }) }\n let(:experiment) { Split::Experiment.new('link_color') }\n \n before(:each) do\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #678 from splitrb/fix-hash-syntax-offenses"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675187,"cells":{"id":{"kind":"string","value":"10070837"},"text":{"kind":"string","value":" user_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/experiment_catalog\"\n\ndescribe Split::User do\n let(:user_keys) { { 'link_color' => 'blue' } }\n let(:context) { double(:session => { split: user_keys }) }\n let(:experiment) { Split::Experiment.new('link_color') }\n\n before(:each) do\n\n before(:each) do\n @subject = described_class.new(context)\n end\n\n it \"delegates methods correctly\" do\n expect(@subject[\"link_color\"]).to eq(@subject.user[\"link_color\"])\n end\n\n context \"#cleanup_old_versions!\" do\n let(:experiment_version) { \"#{experiment.name}:1\" }\n let(:second_experiment_version) { \"#{experiment.name}_another:1\" }\n let(:third_experiment_version) { \"variation_of_#{experiment.name}:1\" }\n let(:user_keys) do\n {\n experiment_version => \"blue\",\n second_experiment_version => \"red\",\n third_experiment_version => \"yellow\"\n }\n end\n\n before(:each) { @subject.cleanup_old_versions!(experiment) }\n\n it \"removes key if old experiment is found\" do\n expect(@subject.keys).not_to include(experiment_version)\n end\n\n it \"does not remove other keys\" do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n end\n\n context \"#cleanup_old_experiments!\" do\n it \"removes key if experiment is not found\" do\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(true)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has not started yet\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n context \"with finished key\" do\n let(:user_keys) { { \"link_color\" => \"blue\", \"link_color:finished\" => true } }\n\n it \"does not remove finished key for experiment without a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color:finished\").and_return(nil)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to include(\"link_color\")\n expect(@subject.keys).to include(\"link_color:finished\")\n end\n end\n\n context \"when already cleaned up\" do\n before do\n @subject.cleanup_old_experiments!\n end\n\n it \"does not clean up again\" do\n expect(@subject).to_not receive(:keys_without_finished)\n @subject.cleanup_old_experiments!\n end\n end\n end\n\n context \"allows user to be loaded from adapter\" do\n it \"loads user from adapter (RedisAdapter)\" do\n user = Split::Persistence::RedisAdapter.new(nil, 112233)\n user[\"foo\"] = \"bar\"\n\n ab_user = Split::User.find(112233, :redis)\n\n expect(ab_user[\"foo\"]).to eql(\"bar\")\n end\n\n it \"returns nil if adapter does not implement a finder method\" do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n end\n\n context \"instantiated with custom adapter\" do\n let(:custom_adapter) { double(:persistence_adapter) }\n\n before do\n @subject = described_class.new(context, custom_adapter)\n end\n\n it \"sets user to the custom adapter\" do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\nend\n\n Merge pull request #678 from splitrb/fix-hash-syntax-offenses\n\nFix Style/HashSyntax offenses\n @@ -5,7 +5,7 @@ require 'split/user'\n \n describe Split::User do\n let(:user_keys) { { 'link_color' => 'blue' } }\n- let(:context) { double(:session => { split: user_keys }) }\n+ let(:context) { double(session: { split: user_keys }) }\n let(:experiment) { Split::Experiment.new('link_color') }\n \n before(:each) do\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #678 from splitrb/fix-hash-syntax-offenses"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675188,"cells":{"id":{"kind":"string","value":"10070838"},"text":{"kind":"string","value":" user_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\nrequire \"split/experiment_catalog\"\n\ndescribe Split::User do\n let(:user_keys) { { 'link_color' => 'blue' } }\n let(:context) { double(:session => { split: user_keys }) }\n let(:experiment) { Split::Experiment.new('link_color') }\n\n before(:each) do\n\n before(:each) do\n @subject = described_class.new(context)\n end\n\n it \"delegates methods correctly\" do\n expect(@subject[\"link_color\"]).to eq(@subject.user[\"link_color\"])\n end\n\n context \"#cleanup_old_versions!\" do\n let(:experiment_version) { \"#{experiment.name}:1\" }\n let(:second_experiment_version) { \"#{experiment.name}_another:1\" }\n let(:third_experiment_version) { \"variation_of_#{experiment.name}:1\" }\n let(:user_keys) do\n {\n experiment_version => \"blue\",\n second_experiment_version => \"red\",\n third_experiment_version => \"yellow\"\n }\n end\n\n before(:each) { @subject.cleanup_old_versions!(experiment) }\n\n it \"removes key if old experiment is found\" do\n expect(@subject.keys).not_to include(experiment_version)\n end\n\n it \"does not remove other keys\" do\n expect(@subject.keys).to include(second_experiment_version, third_experiment_version)\n end\n end\n\n context \"#cleanup_old_experiments!\" do\n it \"removes key if experiment is not found\" do\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(true)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n it \"removes key if experiment has not started yet\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to be_empty\n end\n\n context \"with finished key\" do\n let(:user_keys) { { \"link_color\" => \"blue\", \"link_color:finished\" => true } }\n\n it \"does not remove finished key for experiment without a winner\" do\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color\").and_return(experiment)\n allow(Split::ExperimentCatalog).to receive(:find).with(\"link_color:finished\").and_return(nil)\n allow(experiment).to receive(:start_time).and_return(Date.today)\n allow(experiment).to receive(:has_winner?).and_return(false)\n @subject.cleanup_old_experiments!\n expect(@subject.keys).to include(\"link_color\")\n expect(@subject.keys).to include(\"link_color:finished\")\n end\n end\n\n context \"when already cleaned up\" do\n before do\n @subject.cleanup_old_experiments!\n end\n\n it \"does not clean up again\" do\n expect(@subject).to_not receive(:keys_without_finished)\n @subject.cleanup_old_experiments!\n end\n end\n end\n\n context \"allows user to be loaded from adapter\" do\n it \"loads user from adapter (RedisAdapter)\" do\n user = Split::Persistence::RedisAdapter.new(nil, 112233)\n user[\"foo\"] = \"bar\"\n\n ab_user = Split::User.find(112233, :redis)\n\n expect(ab_user[\"foo\"]).to eql(\"bar\")\n end\n\n it \"returns nil if adapter does not implement a finder method\" do\n ab_user = Split::User.find(112233, :dual_adapter)\n expect(ab_user).to be_nil\n end\n end\n\n context \"instantiated with custom adapter\" do\n let(:custom_adapter) { double(:persistence_adapter) }\n\n before do\n @subject = described_class.new(context, custom_adapter)\n end\n\n it \"sets user to the custom adapter\" do\n expect(@subject.user).to eq(custom_adapter)\n end\n end\nend\n\n Merge pull request #678 from splitrb/fix-hash-syntax-offenses\n\nFix Style/HashSyntax offenses\n @@ -5,7 +5,7 @@ require 'split/user'\n \n describe Split::User do\n let(:user_keys) { { 'link_color' => 'blue' } }\n- let(:context) { double(:session => { split: user_keys }) }\n+ let(:context) { double(session: { split: user_keys }) }\n let(:experiment) { Split::Experiment.new('link_color') }\n \n before(:each) do\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #678 from splitrb/fix-hash-syntax-offenses"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675189,"cells":{"id":{"kind":"string","value":"10070839"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n \"shared/#{alternative}\"\n end.should eq('shared/blue')\n end\n end\n\n describe 'finished' do\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Allow parameter overrides, even without Redis.\n\nWith this option, applications can be tested to ensure that the options\nrender correctly, even without Redis installed and running on the\ndevelopment machines.\n\n @@ -410,6 +410,27 @@ describe Split::Helper do\n \"shared/#{alternative}\"\n end.should eq('shared/blue')\n end\n+\n+ context 'and db_failover_allow_parameter_override config option is turned on' do\n+ before(:each) do\n+ Split.configure do |config|\n+ config.db_failover_allow_parameter_override = true\n+ end\n+ end\n+\n+ context 'and given an override parameter' do\n+ it 'should use given override instead of the first alternative' do\n+ @params = {'link_color' => 'red'}\n+ ab_test('link_color', 'blue', 'red').should eq('red')\n+ ab_test('link_color', 'blue', 'red', 'green').should eq('red')\n+ ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red')\n+ ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red')\n+ ab_test('link_color', 'blue', 'red') do |alternative|\n+ \"shared/#{alternative}\"\n+ end.should eq('shared/red')\n+ end\n+ end\n+ end\n end\n \n describe 'finished' do\n"},"addition_count":{"kind":"number","value":21,"string":"21"},"commit_subject":{"kind":"string","value":"Allow parameter overrides, even without Redis."},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675190,"cells":{"id":{"kind":"string","value":"10070840"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n \"shared/#{alternative}\"\n end.should eq('shared/blue')\n end\n end\n\n describe 'finished' do\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Allow parameter overrides, even without Redis.\n\nWith this option, applications can be tested to ensure that the options\nrender correctly, even without Redis installed and running on the\ndevelopment machines.\n\n @@ -410,6 +410,27 @@ describe Split::Helper do\n \"shared/#{alternative}\"\n end.should eq('shared/blue')\n end\n+\n+ context 'and db_failover_allow_parameter_override config option is turned on' do\n+ before(:each) do\n+ Split.configure do |config|\n+ config.db_failover_allow_parameter_override = true\n+ end\n+ end\n+\n+ context 'and given an override parameter' do\n+ it 'should use given override instead of the first alternative' do\n+ @params = {'link_color' => 'red'}\n+ ab_test('link_color', 'blue', 'red').should eq('red')\n+ ab_test('link_color', 'blue', 'red', 'green').should eq('red')\n+ ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red')\n+ ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red')\n+ ab_test('link_color', 'blue', 'red') do |alternative|\n+ \"shared/#{alternative}\"\n+ end.should eq('shared/red')\n+ end\n+ end\n+ end\n end\n \n describe 'finished' do\n"},"addition_count":{"kind":"number","value":21,"string":"21"},"commit_subject":{"kind":"string","value":"Allow parameter overrides, even without Redis."},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675191,"cells":{"id":{"kind":"string","value":"10070841"},"text":{"kind":"string","value":" helper_spec.rb\n # frozen_string_literal: true\n\nrequire \"spec_helper\"\n\n# TODO change some of these tests to use Rack::Test\n\ndescribe Split::Helper do\n include Split::Helper\n\n let(:experiment) {\n Split::ExperimentCatalog.find_or_create(\"link_color\", \"blue\", \"red\")\n }\n\n describe \"ab_test\" do\n it \"should not raise an error when passed strings for alternatives\" do\n expect { ab_test(\"xyz\", \"1\", \"2\", \"3\") }.not_to raise_error\n end\n\n it \"should not raise an error when passed an array for alternatives\" do\n expect { ab_test(\"xyz\", [\"1\", \"2\", \"3\"]) }.not_to raise_error\n end\n\n it \"should raise the appropriate error when passed integers for alternatives\" do\n expect { ab_test(\"xyz\", 1, 2, 3) }.to raise_error(ArgumentError)\n end\n\n it \"should raise the appropriate error when passed symbols for alternatives\" do\n expect { ab_test(\"xyz\", :a, :b, :c) }.to raise_error(ArgumentError)\n end\n\n it \"should not raise error when passed an array for goals\" do\n expect { ab_test({ \"link_color\" => [\"purchase\", \"refund\"] }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should not raise error when passed just one goal\" do\n expect { ab_test({ \"link_color\" => \"purchase\" }, \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"raises an appropriate error when processing combined expirements\" do\n Split.configuration.experiments = {\n combined_exp_1: {\n alternatives: [ { name: \"control\", percent: 50 }, { name: \"test-alt\", percent: 50 } ],\n metric: :my_metric,\n combined_experiments: [:combined_exp_1_sub_1]\n }\n }\n Split::ExperimentCatalog.find_or_create(\"combined_exp_1\")\n expect { ab_test(\"combined_exp_1\") }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"should assign a random alternative to a new user when there are an equal number of alternatives assigned\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should increment the participation counter after assignment to a new user\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1)\n end\n\n it \"should not increment the counter for an experiment that the user is not participating in\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n # User shouldn't participate in this second experiment\n ab_test(\"button_size\", \"small\", \"big\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should not increment the counter for an not started experiment\" do\n expect(Split.configuration).to receive(:start_manually).and_return(true)\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n expect {\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n }.not_to change { e.participant_count }\n end\n\n it \"should return the given alternative for an existing user\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always return the winner if one is present\" do\n experiment.winner = \"orange\"\n\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"orange\")\n end\n\n it \"should allow the alternative to be forced by passing it in the params\" do\n # ?ab_test[link_color]=blue\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"red\")\n\n alternative = ab_test(\"link_color\", { \"blue\" => 5 }, \"red\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not allow an arbitrary alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"pink\" } }\n alternative = ab_test(\"link_color\", \"blue\")\n expect(alternative).to eq(\"blue\")\n end\n\n it \"should not store the split when a param forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"SPLIT_DISABLE query parameter should also force the alternative (uses control)\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", { \"blue\" => 1 }, \"red\" => 5)\n expect(alternative).to eq(\"blue\")\n alternative = ab_test(\"link_color\", \"red\", \"blue\")\n expect(alternative).to eq(\"red\")\n alternative = ab_test(\"link_color\", { \"red\" => 5 }, \"blue\" => 1)\n expect(alternative).to eq(\"red\")\n end\n\n it \"should not store the split when Split generically disabled\" do\n @params = { \"SPLIT_DISABLE\" => \"true\" }\n expect(ab_user).not_to receive(:[]=)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n context \"when store_override is set\" do\n before { Split.configuration.store_override = true }\n\n it \"should store the forced alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"blue\" } }\n expect(ab_user).to receive(:[]=).with(\"link_color\", \"blue\")\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n context \"when on_trial_choose is set\" do\n before { Split.configuration.on_trial_choose = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n end\n\n it \"should allow passing a block\" do\n alt = ab_test(\"link_color\", \"blue\", \"red\")\n ret = ab_test(\"link_color\", \"blue\", \"red\") { |alternative| \"shared/#{alternative}\" }\n expect(ret).to eq(\"shared/#{alt}\")\n end\n\n it \"should allow the share of visitors see an alternative to be specified\" do\n ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })\n expect([\"red\", \"blue\"]).to include(ab_user[\"link_color\"])\n end\n\n it \"should allow alternative weighting interface as a single hash\" do\n ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.alternatives.map(&:name)).to eq([\"blue\", \"red\"])\n expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2])\n end\n\n it \"should only let a user participate in one experiment at a time\" do\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n big = Split::Alternative.new(\"big\", \"button_size\")\n expect(big.participant_count).to eq(0)\n small = Split::Alternative.new(\"small\", \"button_size\")\n expect(small.participant_count).to eq(0)\n end\n\n it \"should let a user participate in many experiment with allow_multiple_experiments option\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n link_color = ab_test(\"link_color\", \"blue\", \"red\")\n button_size = ab_test(\"button_size\", \"small\", \"big\")\n expect(ab_user[\"link_color\"]).to eq(link_color)\n expect(ab_user[\"button_size\"]).to eq(button_size)\n button_size_alt = Split::Alternative.new(button_size, \"button_size\")\n expect(button_size_alt.participant_count).to eq(1)\n end\n\n context \"with allow_multiple_experiments = 'control'\" do\n it \"should let a user participate in many experiment with one non-'control' alternative\" do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n groups = 100.times.map do |n|\n ab_test(\"test#{n}\".to_sym, { \"control\" => (100 - n) }, { \"test#{n}-alt\" => n })\n end\n\n experiments = ab_user.active_experiments\n expect(experiments.size).to be > 1\n\n count_control = experiments.values.count { |g| g == \"control\" }\n expect(count_control).to eq(experiments.size - 1)\n\n count_alts = groups.count { |g| g != \"control\" }\n expect(count_alts).to eq(1)\n end\n\n context \"when user already has experiment\" do\n let(:mock_user) { Split::User.new(self, { \"test_0\" => \"test-alt\" }) }\n\n before do\n Split.configure do |config|\n config.allow_multiple_experiments = \"control\"\n end\n\n Split::ExperimentCatalog.find_or_initialize(\"test_0\", \"control\", \"test-alt\").save\n Split::ExperimentCatalog.find_or_initialize(\"test_1\", \"control\", \"test-alt\").save\n end\n\n it \"should restore previously selected alternative\" do\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 1 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"should select the correct alternatives after experiment resets\" do\n experiment = Split::ExperimentCatalog.find(:test_0)\n experiment.reset\n mock_user[experiment.key] = \"test-alt\"\n\n expect(ab_user.active_experiments.size).to eq 1\n expect(ab_test(:test_0, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"test-alt\"\n end\n\n it \"lets override existing choice\" do\n pending \"this requires user store reset on first call not depending on whelther it is current trial\"\n @params = { \"ab_test\" => { \"test_1\" => \"test-alt\" } }\n\n expect(ab_test(:test_0, { \"control\" => 0 }, { \"test-alt\" => 100 })).to eq \"control\"\n expect(ab_test(:test_1, { \"control\" => 100 }, { \"test-alt\" => 1 })).to eq \"test-alt\"\n end\n end\n end\n\n it \"should not over-write a finished key when an experiment is on a later version\" do\n experiment.increment_version\n ab_user = { experiment.key => \"blue\", experiment.finished_key => true }\n finished_session = ab_user.dup\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user).to eq(finished_session)\n end\n end\n\n describe \"metadata\" do\n context \"is defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: { \"one\" => \"Meta1\", \"two\" => \"Meta2\" }\n }\n }\n end\n\n it \"should be passed to helper block\" do\n @params = { \"ab_test\" => { \"my_experiment\" => \"two\" } }\n expect(ab_test(\"my_experiment\")).to eq \"two\"\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq(\"Meta2\")\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\")).to eq \"one\"\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq(\"Meta1\")\n end\n end\n\n context \"is not defined\" do\n before do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n metadata: nil\n }\n }\n end\n\n it \"should be passed to helper block\" do\n expect(ab_test(\"my_experiment\") do |alternative, meta|\n meta\n end).to eq({})\n end\n\n it \"should pass control metadata helper block if library disabled\" do\n Split.configure do |config|\n config.enabled = false\n end\n\n expect(ab_test(\"my_experiment\") do |_, meta|\n meta\n end).to eq({})\n end\n end\n end\n\n describe \"ab_finished\" do\n context \"for an experiment that the user participates in\" do\n before(:each) do\n @experiment_name = \"link_color\"\n @alternatives = [\"blue\", \"red\"]\n @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives)\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n end\n\n it \"should increment the counter for the completed alternative\" do\n ab_finished(@experiment_name)\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should set experiment's finished key if reset is false\" do\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should not increment the counter if reset is false and the experiment has been already finished\" do\n 2.times { ab_finished(@experiment_name, { reset: false }) }\n new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count\n expect(new_completion_count).to eq(@previous_completion_count + 1)\n end\n\n it \"should not increment the counter for an ended experiment\" do\n e = Split::ExperimentCatalog.find_or_create(\"button_size\", \"small\", \"big\")\n e.winner = \"small\"\n a = ab_test(\"button_size\", \"small\", \"big\")\n expect(a).to eq(\"small\")\n expect {\n ab_finished(\"button_size\")\n }.not_to change { Split::Alternative.new(a, \"button_size\").completed_count }\n end\n\n it \"should clear out the user's participation from their session\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should not clear out the users session if reset is false\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name, { reset: false })\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n expect(ab_user[@experiment.finished_key]).to eq(true)\n end\n\n it \"should reset the users session when experiment is not versioned\" do\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n it \"should reset the users session when experiment is versioned\" do\n @experiment.increment_version\n @alternative_name = ab_test(@experiment_name, *@alternatives)\n\n expect(ab_user[@experiment.key]).to eq(@alternative_name)\n ab_finished(@experiment_name)\n expect(ab_user.keys).to be_empty\n end\n\n context \"when on_trial_complete is set\" do\n before { Split.configuration.on_trial_complete = :some_method }\n it \"should call the method\" do\n expect(self).to receive(:some_method)\n ab_finished(@experiment_name)\n \"shared/#{alternative}\"\n end.should eq('shared/blue')\n end\n end\n\n describe 'finished' do\n end\n\n context \"for an experiment that the user does not participate in\" do\n before do\n Split::ExperimentCatalog.find_or_create(:not_started_experiment, \"control\", \"alt\")\n end\n it \"should not raise an exception\" do\n expect { ab_finished(:not_started_experiment) }.not_to raise_exception\n end\n\n it \"should not change the user state when reset is false\" do\n expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([])\n end\n\n it \"should not change the user state when reset is true\" do\n expect(self).not_to receive(:reset!)\n ab_finished(:not_started_experiment)\n end\n\n it \"should not increment the completed counter\" do\n ab_finished(:not_started_experiment)\n expect(Split::Alternative.new(\"control\", :not_started_experiment).completed_count).to eq(0)\n expect(Split::Alternative.new(\"alt\", :not_started_experiment).completed_count).to eq(0)\n end\n end\n end\n\n context \"finished with config\" do\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_experiment: {\n alternatives: [\"one\", \"two\"],\n resettable: false,\n }\n }\n alternative = ab_test(:my_experiment)\n experiment = Split::ExperimentCatalog.find :my_experiment\n\n ab_finished :my_experiment\n expect(ab_user[experiment.key]).to eq(alternative)\n expect(ab_user[experiment.finished_key]).to eq(true)\n end\n end\n\n context \"finished with metric name\" do\n before { Split.configuration.experiments = {} }\n before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original }\n\n def should_finish_experiment(experiment_name, should_finish = true)\n alts = Split.configuration.experiments[experiment_name][:alternatives]\n experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts)\n alt_name = ab_user[experiment.key] = alts.first\n alt = double(\"alternative\")\n expect(alt).to receive(:name).at_most(1).times.and_return(alt_name)\n expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt)\n if should_finish\n expect(alt).to receive(:increment_completion).at_most(1).times\n else\n expect(alt).not_to receive(:increment_completion)\n end\n end\n\n it \"completes the test\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n metric: :my_metric\n }\n should_finish_experiment :my_experiment\n ab_finished :my_metric\n end\n\n it \"completes all relevant tests\" do\n Split.configuration.experiments = {\n exp_1: {\n alternatives: [ \"1-1\", \"1-2\" ],\n metric: :my_metric\n },\n exp_2: {\n alternatives: [ \"2-1\", \"2-2\" ],\n metric: :another_metric\n },\n exp_3: {\n alternatives: [ \"3-1\", \"3-2\" ],\n metric: :my_metric\n },\n }\n should_finish_experiment :exp_1\n should_finish_experiment :exp_2, false\n should_finish_experiment :exp_3\n ab_finished :my_metric\n end\n\n it \"passes reset option\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n resettable: false,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n\n it \"passes through options\" do\n Split.configuration.experiments = {\n my_exp: {\n alternatives: [\"one\", \"two\"],\n metric: :my_metric,\n }\n }\n alternative_name = ab_test(:my_exp)\n exp = Split::ExperimentCatalog.find :my_exp\n\n ab_finished :my_metric, reset: false\n expect(ab_user[exp.key]).to eq(alternative_name)\n expect(ab_user[exp.finished_key]).to be_truthy\n end\n end\n\n describe \"conversions\" do\n it \"should return a conversion rate for an alternative\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(previous_convertion_rate).to eq(0.0)\n\n ab_finished(\"link_color\")\n\n new_convertion_rate = Split::Alternative.new(alternative_name, \"link_color\").conversion_rate\n expect(new_convertion_rate).to eq(1.0)\n end\n end\n\n describe \"active experiments\" do\n it \"should show an active test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show a finished test\" do\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n ab_finished(\"def\", { reset: false })\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"def\"\n expect(active_experiments.first[1]).to eq alternative\n end\n\n it \"should show an active test when an experiment is on a later version\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"link_color\"\n end\n\n it \"should show versioned tests properly\" do\n 10.times { experiment.reset }\n\n alternative = ab_test(experiment.name, \"blue\", \"red\")\n ab_finished(experiment.name, reset: false)\n\n expect(experiment.version).to eq(10)\n expect(active_experiments.count).to eq 1\n expect(active_experiments).to eq({ \"link_color\" => alternative })\n end\n\n it \"should show multiple tests\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n alternative = ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 2\n expect(active_experiments[\"def\"]).to eq alternative\n expect(active_experiments[\"ghi\"]).to eq another_alternative\n end\n\n it \"should not show tests with winners\" do\n Split.configure do |config|\n config.allow_multiple_experiments = true\n end\n e = Split::ExperimentCatalog.find_or_create(\"def\", \"4\", \"5\", \"6\")\n e.winner = \"4\"\n ab_test(\"def\", \"4\", \"5\", \"6\")\n another_alternative = ab_test(\"ghi\", \"7\", \"8\", \"9\")\n expect(active_experiments.count).to eq 1\n expect(active_experiments.first[0]).to eq \"ghi\"\n expect(active_experiments.first[1]).to eq another_alternative\n end\n end\n\n describe \"when user is a robot\" do\n before(:each) do\n @request = OpenStruct.new(user_agent: \"Googlebot/2.1 (+http://www.google.com/bot.html)\")\n end\n\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not create a experiment\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n expect(Split::Experiment.new(\"link_color\")).to be_a_new_record\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when providing custom ignore logic\" do\n context \"using a proc to configure custom logic\" do\n before(:each) do\n Split.configure do |c|\n c.ignore_filter = proc { |request| true } # ignore everything\n end\n end\n\n it \"ignores the ab_test\" do\n ab_test(\"link_color\", \"blue\", \"red\")\n\n red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n expect((red_count + blue_count)).to be(0)\n end\n end\n end\n\n shared_examples_for \"a disabled test\" do\n describe \"ab_test\" do\n it \"should return the control\" do\n alternative = ab_test(\"link_color\", \"blue\", \"red\")\n expect(alternative).to eq experiment.control.name\n end\n\n it \"should not increment the participation count\" do\n previous_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n previous_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n ab_test(\"link_color\", \"blue\", \"red\")\n\n new_red_count = Split::Alternative.new(\"red\", \"link_color\").participant_count\n new_blue_count = Split::Alternative.new(\"blue\", \"link_color\").participant_count\n\n expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count)\n end\n end\n\n describe \"finished\" do\n it \"should not increment the completed count\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n\n previous_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n ab_finished(\"link_color\")\n\n new_completion_count = Split::Alternative.new(alternative_name, \"link_color\").completed_count\n\n expect(new_completion_count).to eq(previous_completion_count)\n end\n end\n end\n\n describe \"when ip address is ignored\" do\n context \"individually\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.130\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"for a range\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.129\")\n Split.configure do |c|\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"using both a range and a specific value\" do\n before(:each) do\n @request = OpenStruct.new(ip: \"81.19.48.128\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n c.ignore_ip_addresses << /81\\.19\\.48\\.[0-9]+/\n end\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n context \"when ignored other address\" do\n before do\n @request = OpenStruct.new(ip: \"1.1.1.1\")\n Split.configure do |c|\n c.ignore_ip_addresses << \"81.19.48.130\"\n end\n end\n\n it \"works as usual\" do\n alternative_name = ab_test(\"link_color\", \"red\", \"blue\")\n expect {\n ab_finished(\"link_color\")\n }.to change(Split::Alternative.new(alternative_name, \"link_color\"), :completed_count).by(1)\n end\n end\n end\n\n describe \"when user is previewing\" do\n before(:each) do\n @request = OpenStruct.new(headers: { \"x-purpose\" => \"preview\" })\n end\n\n it_behaves_like \"a disabled test\"\n end\n\n describe \"versioned experiments\" do\n it \"should use version zero if no version is present\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(experiment.version).to eq(0)\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n end\n\n it \"should save the version of the experiment to the session\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n end\n\n it \"should load the experiment even if the version is not 0\" do\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(alternative_name)\n return_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(return_alternative_name).to eq(alternative_name)\n end\n\n it \"should reset the session of a user on an older version of the experiment\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n new_alternative = Split::Alternative.new(new_alternative_name, \"link_color\")\n expect(new_alternative.participant_count).to eq(1)\n end\n\n it \"should cleanup old versions of experiments from the session\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(1)\n\n experiment.reset\n expect(experiment.version).to eq(1)\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.participant_count).to eq(0)\n\n new_alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color:1\"]).to eq(new_alternative_name)\n end\n\n it \"should only count completion of users on the current version\" do\n alternative_name = ab_test(\"link_color\", \"blue\", \"red\")\n expect(ab_user[\"link_color\"]).to eq(alternative_name)\n Split::Alternative.new(alternative_name, \"link_color\")\n\n experiment.reset\n expect(experiment.version).to eq(1)\n\n ab_finished(\"link_color\")\n alternative = Split::Alternative.new(alternative_name, \"link_color\")\n expect(alternative.completed_count).to eq(0)\n end\n end\n\n context \"when redis is not available\" do\n before(:each) do\n expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new)\n end\n\n context \"and db_failover config option is turned off\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = false\n end\n end\n\n describe \"ab_test\" do\n it \"should raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"finished\" do\n it \"should raise an exception\" do\n expect { ab_finished(\"link_color\") }.to raise_error(Errno::ECONNREFUSED)\n end\n end\n\n describe \"disable split testing\" do\n before(:each) do\n Split.configure do |config|\n config.enabled = false\n end\n end\n\n it \"should not attempt to connect to redis\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should return control variable\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n end\n end\n\n context \"and db_failover config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover = true\n end\n end\n\n describe \"ab_test\" do\n it \"should not raise an exception\" do\n expect { ab_test(\"link_color\", \"blue\", \"red\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_test(\"link_color\", \"blue\", \"red\")\n end\n\n it \"should always use first alternative\" do\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"blue\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"blue\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/blue\")\n end\n\n context \"and db_failover_allow_parameter_override config option is turned on\" do\n before(:each) do\n Split.configure do |config|\n config.db_failover_allow_parameter_override = true\n end\n end\n\n context \"and given an override parameter\" do\n it \"should use given override instead of the first alternative\" do\n @params = { \"ab_test\" => { \"link_color\" => \"red\" } }\n expect(ab_test(\"link_color\", \"blue\", \"red\")).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\", \"green\")).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.01 }, \"red\" => 0.2)).to eq(\"red\")\n expect(ab_test(\"link_color\", { \"blue\" => 0.8 }, { \"red\" => 20 })).to eq(\"red\")\n expect(ab_test(\"link_color\", \"blue\", \"red\") do |alternative|\n \"shared/#{alternative}\"\n end).to eq(\"shared/red\")\n end\n end\n end\n\n context \"and preloaded config given\" do\n before do\n Split.configuration.experiments[:link_color] = {\n alternatives: [ \"blue\", \"red\" ],\n }\n end\n\n it \"uses first alternative\" do\n expect(ab_test(:link_color)).to eq(\"blue\")\n end\n end\n end\n\n describe \"finished\" do\n it \"should not raise an exception\" do\n expect { ab_finished(\"link_color\") }.not_to raise_error\n end\n\n it \"should call db_failover_on_db_error proc with error as parameter\" do\n Split.configure do |config|\n config.db_failover_on_db_error = proc do |error|\n expect(error).to be_a(Errno::ECONNREFUSED)\n end\n end\n\n expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original\n ab_finished(\"link_color\")\n end\n end\n end\n end\n\n context \"with preloaded config\" do\n before { Split.configuration.experiments = {} }\n\n it \"pulls options from config file\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n ab_test :my_experiment\n expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(Split::Experiment.new(:my_experiment).goals).to eq([ \"goal1\", \"goal2\" ])\n end\n\n it \"can be called multiple times\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [\"goal1\", \"goal2\"]\n }\n 5.times { ab_test :my_experiment }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"other_opt\" ])\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\" ])\n expect(experiment.participant_count).to eq(1)\n end\n\n it \"accepts multiple goals\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ],\n goals: [ \"goal1\", \"goal2\", \"goal3\" ]\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([ \"goal1\", \"goal2\", \"goal3\" ])\n end\n\n it \"allow specifying goals to be optional\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"other_opt\" ]\n }\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.goals).to eq([])\n end\n\n it \"accepts multiple alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [ \"control_opt\", \"second_opt\", \"third_opt\" ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.map(&:name)).to eq([ \"control_opt\", \"second_opt\", \"third_opt\" ])\n end\n\n it \"accepts probability on alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 67 },\n { name: \"second_opt\", percent: 10 },\n { name: \"third_opt\", percent: 23 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([[\"control_opt\", 0.67], [\"second_opt\", 0.1], [\"third_opt\", 0.23]])\n end\n\n it \"accepts probability on some alternatives\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\", percent: 34 },\n \"second_opt\",\n { name: \"third_opt\", percent: 23 },\n \"fourth_opt\",\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.34], [\"second_opt\", 0.215], [\"third_opt\", 0.23], [\"fourth_opt\", 0.215]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"allows name param without probability\" do\n Split.configuration.experiments[:my_experiment] = {\n alternatives: [\n { name: \"control_opt\" },\n \"second_opt\",\n { name: \"third_opt\", percent: 64 },\n ],\n }\n ab_test :my_experiment\n experiment = Split::Experiment.new(:my_experiment)\n names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] }\n expect(names_and_weights).to eq([[\"control_opt\", 0.18], [\"second_opt\", 0.18], [\"third_opt\", 0.64]])\n expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0)\n end\n\n it \"fails gracefully if config is missing experiment\" do\n Split.configuration.experiments = { other_experiment: { foo: \"Bar\" } }\n expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound)\n end\n\n it \"fails gracefully if config is missing\" do\n expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError)\n end\n\n it \"fails gracefully if config is missing alternatives\" do\n Split.configuration.experiments[:my_experiment] = { foo: \"Bar\" }\n expect { ab_test :my_experiment }.to raise_error(NoMethodError)\n end\n end\n\n it \"should handle multiple experiments correctly\" do\n experiment2 = Split::ExperimentCatalog.find_or_create(\"link_color2\", \"blue\", \"red\")\n ab_test(\"link_color\", \"blue\", \"red\")\n ab_test(\"link_color2\", \"blue\", \"red\")\n ab_finished(\"link_color2\")\n\n experiment2.alternatives.each do |alt|\n expect(alt.unfinished_count).to eq(0)\n end\n end\n\n context \"with goals\" do\n before do\n @experiment = { \"link_color\" => [\"purchase\", \"refund\"] }\n @alternatives = [\"blue\", \"red\"]\n @experiment_name, @goals = normalize_metric(@experiment)\n @goal1 = @goals[0]\n @goal2 = @goals[1]\n end\n\n it \"should normalize experiment\" do\n expect(@experiment_name).to eq(\"link_color\")\n expect(@goals).to eq([\"purchase\", \"refund\"])\n end\n\n describe \"ab_test\" do\n it \"should allow experiment goals interface as a single hash\" do\n ab_test(@experiment, *@alternatives)\n experiment = Split::ExperimentCatalog.find(\"link_color\")\n expect(experiment.goals).to eq([\"purchase\", \"refund\"])\n end\n end\n\n describe \"ab_finished\" do\n before do\n @alternative_name = ab_test(@experiment, *@alternatives)\n end\n\n it \"should increment the counter for the specified-goal completed alternative\" do\n expect { ab_finished({ \"link_color\" => [\"purchase\"] }) }\n .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0)\n .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1)\n end\n end\n end\nend\n\n Allow parameter overrides, even without Redis.\n\nWith this option, applications can be tested to ensure that the options\nrender correctly, even without Redis installed and running on the\ndevelopment machines.\n\n @@ -410,6 +410,27 @@ describe Split::Helper do\n \"shared/#{alternative}\"\n end.should eq('shared/blue')\n end\n+\n+ context 'and db_failover_allow_parameter_override config option is turned on' do\n+ before(:each) do\n+ Split.configure do |config|\n+ config.db_failover_allow_parameter_override = true\n+ end\n+ end\n+\n+ context 'and given an override parameter' do\n+ it 'should use given override instead of the first alternative' do\n+ @params = {'link_color' => 'red'}\n+ ab_test('link_color', 'blue', 'red').should eq('red')\n+ ab_test('link_color', 'blue', 'red', 'green').should eq('red')\n+ ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red')\n+ ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red')\n+ ab_test('link_color', 'blue', 'red') do |alternative|\n+ \"shared/#{alternative}\"\n+ end.should eq('shared/red')\n+ end\n+ end\n+ end\n end\n \n describe 'finished' do\n"},"addition_count":{"kind":"number","value":21,"string":"21"},"commit_subject":{"kind":"string","value":"Allow parameter overrides, even without Redis."},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675192,"cells":{"id":{"kind":"string","value":"10070842"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\n### Rails\n\nAdding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'.\n\n### Sinatra\n\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #358 from andreibondarev/fix-readme-typo\n\nFix typo in README\n @@ -36,7 +36,7 @@ gem install split\n \n ### Rails\n \n-Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'.\n+Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured redis it will 'just work'.\n \n ### Sinatra\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #358 from andreibondarev/fix-readme-typo"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675193,"cells":{"id":{"kind":"string","value":"10070843"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\n### Rails\n\nAdding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'.\n\n### Sinatra\n\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #358 from andreibondarev/fix-readme-typo\n\nFix typo in README\n @@ -36,7 +36,7 @@ gem install split\n \n ### Rails\n \n-Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'.\n+Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured redis it will 'just work'.\n \n ### Sinatra\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #358 from andreibondarev/fix-readme-typo"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675194,"cells":{"id":{"kind":"string","value":"10070844"},"text":{"kind":"string","value":" README.md\n # [Split](https://libraries.io/rubygems/split)\n\n[![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split)\n![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main)\n[![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split)\n[![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage)\n[![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)\n[![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split)\n\n> 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split\n\nSplit is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app.\n\nSplit is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis.\n\nSplit is designed to be hacker friendly, allowing for maximum customisation and extensibility.\n\n## Install\n\n### Requirements\n\nSplit v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2.\n\nIf your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3)\n\nSplit uses Redis as a datastore.\n\nSplit only supports Redis 4.0 or greater.\n\nIf you're on OS X, Homebrew is the simplest way to install Redis:\n\n```bash\nbrew install redis\nredis-server /usr/local/etc/redis.conf\n```\n\n### Rails\n\nAdding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'.\n\n### Sinatra\n\n```\n\n#### Rails\n\nAdding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'.\n\n#### Sinatra\n\nTo configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app:\n\n```ruby\nrequire 'split'\n\nclass MySinatraApp < Sinatra::Base\n enable :sessions\n helpers Split::Helper\n\n get '/' do\n ...\nend\n```\n\n## Usage\n\nTo begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments.\n\n`ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on.\n\nIt can be used to render different templates, show different text or any other case based logic.\n\n`ab_finished` is used to make a completion of an experiment, or conversion.\n\nExample: View\n\n```erb\n<% ab_test(:login_button, \"/images/button1.jpg\", \"/images/button2.jpg\") do |button_file| %>\n <%= image_tag(button_file, alt: \"Login!\") %>\n<% end %>\n```\n\nExample: Controller\n\n```ruby\ndef register_new_user\n # See what level of free points maximizes users' decision to buy replacement points.\n @starter_points = ab_test(:new_user_free_points, '100', '200', '300')\nend\n```\n\nExample: Conversion tracking (in a controller!)\n\n```ruby\ndef buy_new_points\n # some business logic\n ab_finished(:new_user_free_points)\nend\n```\n\nExample: Conversion tracking (in a view)\n\n```erb\nThanks for signing up, dude! <% ab_finished(:signup_page_redesign) %>\n```\n\nYou can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki).\n\n## Statistical Validity\n\nSplit has two options for you to use to determine which alternative is the best.\n\nThe first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch.\n\nAs per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none).\n\n[Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience.\n\nThe second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled \"Confidence.\" This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test.\n\nCalculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day).\n\n```ruby\nSplit.configure do |config|\n config.winning_alternative_recalculation_interval = 3600 # 1 hour\nend\n```\n\n## Extras\n\n### Weighted alternatives\n\nPerhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested.\n\nTo do this you can pass a weight with each alternative in the following ways:\n\n```ruby\nab_test(:homepage_design, {'Old' => 18}, {'New' => 2})\n\nab_test(:homepage_design, 'Old', {'New' => 1.0/9})\n\nab_test(:homepage_design, {'Old' => 9}, 'New')\n```\n\nThis will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1.\n\n### Overriding alternatives\n\nFor development and testing, you may wish to force your app to always return an alternative.\nYou can do this by passing it as a parameter in the url.\n\nIf you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as:\n\n http://myawesomesite.com?ab_test[button_color]=red\n\nwill always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option.\n\nIn the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter.\n\n http://myawesomesite.com?SPLIT_DISABLE=true\n\nIt is not required to send `SPLIT_DISABLE=false` to activate Split.\n\n\n### Rspec Helper\nTo aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below:\n\n```ruby\n# Create a file with these contents at 'spec/support/split_helper.rb'\n# and ensure it is `require`d in your rails_helper.rb or spec_helper.rb\nmodule SplitHelper\n\n # Force a specific experiment alternative to always be returned:\n # use_ab_test(signup_form: \"single_page\")\n #\n # Force alternatives for multiple experiments:\n # use_ab_test(signup_form: \"single_page\", pricing: \"show_enterprise_prices\")\n #\n def use_ab_test(alternatives_by_experiment)\n allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block|\n variant = alternatives_by_experiment.fetch(experiment) { |key| raise \"Unknown experiment '#{key}'\" }\n block.call(variant) unless block.nil?\n variant\n end\n end\nend\n\n# Make the `use_ab_test` method available to all specs:\nRSpec.configure do |config|\n config.include SplitHelper\nend\n```\n\nNow you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example:\n```ruby\nit \"registers using experimental signup\" do\n use_ab_test experiment_name: \"alternative_name\"\n post \"/signups\"\n ...\nend\n```\n\n\n### Starting experiments manually\n\nBy default new A/B tests will be active right after deployment. In case you would like to start new test a while after\nthe deploy, you can do it by setting the `start_manually` configuration option to `true`.\n\nAfter choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized.\n\n### Reset after completion\n\nWhen a user completes a test their session is reset so that they may start the test again in the future.\n\nTo stop this behaviour you can pass the following option to the `ab_finished` method:\n\n```ruby\nab_finished(:experiment_name, reset: false)\n```\n\nThe user will then always see the alternative they started with.\n\nAny old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`.\n\n### Reset experiments manually\n\nBy default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`.\n\nYou may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything.\n\n### Multiple experiments at once\n\nBy default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests.\n\nTo stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so:\n\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = true\nend\n```\n\nThis will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another.\n\nTo address this, setting the `allow_multiple_experiments` config option to 'control' like so:\n```ruby\nSplit.configure do |config|\n config.allow_multiple_experiments = 'control'\nend\n```\n\nFor this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test() will always return the first alternative without adding the user to that experiment.\n\n### Experiment Persistence\n\nSplit comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment.\n\nBy default Split will store the tests for each user in the session.\n\nYou can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing.\n\n#### Cookies\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\nend\n```\n\nWhen using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds).\n\n```ruby\nSplit.configure do |config|\n config.persistence = :cookie\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\nThe data stored consists of the experiment name and the variants the user is in. Example: { \"experiment_name\" => \"variant_a\" }\n\n__Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API\n\n#### Redis\n\nUsing Redis will allow ab_users to persist across sessions or machines.\n\n```ruby\nSplit.configure do |config|\n config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id })\n # Equivalent\n # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id)\nend\n```\n\nOptions:\n* `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration)\n* `namespace`: separate namespace to store these persisted values (default \"persistence\")\n* `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments)\n\n#### Dual Adapter\n\nThe Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users.\n\n```ruby\ncookie_adapter = Split::Persistence::CookieAdapter\nredis_adapter = Split::Persistence::RedisAdapter.with_config(\n lookup_by: -> (context) { context.send(:current_user).try(:id) },\n expire_seconds: 2592000)\n\nSplit.configure do |config|\n config.persistence = Split::Persistence::DualAdapter.with_config(\n logged_in: -> (context) { !context.send(:current_user).try(:id).nil? },\n logged_in_adapter: redis_adapter,\n logged_out_adapter: cookie_adapter)\n config.persistence_cookie_length = 2592000 # 30 days\nend\n```\n\n#### Custom Adapter\n\nYour custom adapter needs to implement the same API as existing adapters.\nSee `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point.\n\n```ruby\nSplit.configure do |config|\n config.persistence = YourCustomAdapterClass\nend\n```\n\n### Trial Event Hooks\n\nYou can define methods that will be called at the same time as experiment\nalternative participation and goal completion.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n config.on_trial = :log_trial # run on every trial\n config.on_trial_choose = :log_trial_choose # run on trials with new users only\n config.on_trial_complete = :log_trial_complete\nend\n```\n\nSet these attributes to a method name available in the same context as the\n`ab_test` method. These methods should accept one argument, a `Trial` instance.\n\n``` ruby\ndef log_trial(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_choose(trial)\n logger.info \"[new user] experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n\ndef log_trial_complete(trial)\n logger.info \"experiment=%s alternative=%s user=%s complete=true\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n#### Views\n\nIf you are running `ab_test` from a view, you must define your event\nhook callback as a\n[helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method)\nin the controller:\n\n``` ruby\nhelper_method :log_trial_choose\n\ndef log_trial_choose(trial)\n logger.info \"experiment=%s alternative=%s user=%s\" %\n [ trial.experiment.name, trial.alternative, current_user.id ]\nend\n```\n\n### Experiment Hooks\n\nYou can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split.\n\nFor example:\n\n``` ruby\nSplit.configure do |config|\n # after experiment reset or deleted\n config.on_experiment_reset = -> (example) { # Do something on reset }\n config.on_experiment_delete = -> (experiment) { # Do something else on delete }\n # before experiment reset or deleted\n config.on_before_experiment_reset = -> (example) { # Do something on reset }\n config.on_before_experiment_delete = -> (experiment) { # Do something else on delete }\n # after experiment winner had been set\n config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose }\nend\n```\n\n## Web Interface\n\nSplit comes with a Sinatra-based front end to get an overview of how your experiments are doing.\n\nIf you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru`\n\n```ruby\nrequire 'split/dashboard'\n\nrun Rack::URLMap.new \\\n \"/\" => Your::App.new,\n \"/split\" => Split::Dashboard.new\n```\n\nHowever, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile:\n\n```ruby\ngem 'split', require: 'split/dashboard'\n```\n\nThen adding this to config/routes.rb\n\n```ruby\nmount Split::Dashboard, at: 'split'\n```\n\nYou may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file)\n\n```ruby\n# Rails apps or apps that already depend on activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n\n# Apps without activesupport\nSplit::Dashboard.use Rack::Auth::Basic do |username, password|\n # Protect against timing attacks:\n # - Use & (do not use &&) so that it doesn't short circuit.\n # - Use digests to stop length information leaking\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_USERNAME\"])) &\n Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV[\"SPLIT_PASSWORD\"]))\nend\n```\n\nYou can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following:\n```ruby\nmatch \"/split\" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do\n request.env['warden'].authenticated? # are we authenticated?\n request.env['warden'].authenticate! # authenticate if not already\n # or even check any other condition such as request.env['warden'].user.is_admin?\nend\n```\n\nMore information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/)\n\n### Screenshot\n\n![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png)\n\n## Configuration\n\nYou can override the default configuration options of Split like so:\n\n```ruby\nSplit.configure do |config|\n config.db_failover = true # handle Redis errors gracefully\n config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) }\n config.allow_multiple_experiments = true\n config.enabled = true\n config.persistence = Split::Persistence::SessionAdapter\n #config.start_manually = false ## new test will have to be started manually from the admin panel. default false\n #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes\n config.include_rails_helper = true\n config.redis = \"redis://custom.redis.url:6380\"\nend\n```\n\nSplit looks for the Redis host in the environment variable `REDIS_URL` then\ndefaults to `redis://localhost:6379` if not specified by configure block.\n\nOn platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to\ndetermine which env variable key to use when retrieving the host config. This\ndefaults to `REDIS_URL`.\n\n### Filtering\n\nIn most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users.\nSplit provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic.\n\n```ruby\nSplit.configure do |config|\n # bot config\n config.robot_regex = /my_custom_robot_regex/ # or\n config.bots['newbot'] = \"Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion\"\n\n # IP config\n config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\\.19\\.48\\.[0-9]+/\n\n # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? }\n config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) }\nend\n```\n\n### Experiment configuration\n\nInstead of providing the experiment options inline, you can store them\nin a hash. This hash can control your experiment's alternatives, weights,\nalgorithm and if the experiment resets once finished:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n resettable: false\n },\n :my_second_experiment => {\n algorithm: 'Split::Algorithms::Whiplash',\n alternatives: [\n { name: \"a\", percent: 67 },\n { name: \"b\", percent: 33 }\n ]\n }\n }\nend\n```\n\nYou can also store your experiments in a YAML file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = YAML.load_file \"config/experiments.yml\"\nend\n```\n\nYou can then define the YAML file like:\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\nmy_second_experiment:\n alternatives:\n - name: a\n percent: 67\n - name: b\n percent: 33\n resettable: false\n```\n\nThis simplifies the calls from your code:\n\n```ruby\nab_test(:my_first_experiment)\n```\n\nand:\n\n```ruby\nab_finished(:my_first_experiment)\n```\n\nYou can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metadata: {\n \"a\" => {\"text\" => \"Have a fantastic day\"},\n \"b\" => {\"text\" => \"Don't get hit by a bus\"}\n }\n }\n }\nend\n```\n\n```yaml\nmy_first_experiment:\n alternatives:\n - a\n - b\n metadata:\n a:\n text: \"Have a fantastic day\"\n b:\n text: \"Don't get hit by a bus\"\n```\n\nThis allows for some advanced experiment configuration using methods like:\n\n```ruby\ntrial.alternative.name # => \"a\"\n\ntrial.metadata['text'] # => \"Have a fantastic day\"\n```\n\nor in views:\n\n```erb\n<% ab_test(\"my_first_experiment\") do |alternative, meta| %>\n <%= alternative %>\n <%= meta['text'] %>\n<% end %>\n```\n\nThe keys used in meta data should be Strings\n\n#### Metrics\n\nYou might wish to track generic metrics, such as conversions, and use\nthose to complete multiple different experiments without adding more to\nyour code. You can use the configuration hash to do this, thanks to\nthe `:metric` option.\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n my_first_experiment: {\n alternatives: [\"a\", \"b\"],\n metric: :my_metric\n }\n }\nend\n```\n\nYour code may then track a completion using the metric instead of\nthe experiment name:\n\n```ruby\nab_finished(:my_metric)\n```\n\nYou can also create a new metric by instantiating and saving a new Metric object.\n\n```ruby\nSplit::Metric.new(:my_metric)\nSplit::Metric.save\n```\n\n#### Goals\n\nYou might wish to allow an experiment to have multiple, distinguishable goals.\nThe API to define goals for an experiment is this:\n\n```ruby\nab_test({link_color: [\"purchase\", \"refund\"]}, \"red\", \"blue\")\n```\n\nor you can define them in a configuration file:\n\n```ruby\nSplit.configure do |config|\n config.experiments = {\n link_color: {\n alternatives: [\"red\", \"blue\"],\n goals: [\"purchase\", \"refund\"]\n }\n }\nend\n```\n\nTo complete a goal conversion, you do it like:\n\n```ruby\nab_finished(link_color: \"purchase\")\n```\n\nNote that if you pass additional options, that should be a separate hash:\n\n```ruby\nab_finished({ link_color: \"purchase\" }, reset: false)\n```\n\n**NOTE:** This does not mean that a single experiment can complete more than one goal.\n\nOnce you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.)\n\n**Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: \"plana_conversion\") or Plan B (goal: \"planb_conversion\").\n\n**Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK.\n\n**Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK.\n\n#### Combined Experiments\nIf you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments.\nConfigure like so:\n```ruby\n Split.configuration.experiments = {\n :button_color_experiment => {\n :alternatives => [\"blue\", \"green\"],\n :combined_experiments => [\"button_color_on_signup\", \"button_color_on_login\"]\n }\n }\n```\n\nStarting the combined test starts all combined experiments\n```ruby\n ab_combined_test(:button_color_experiment)\n```\nFinish each combined test as normal\n\n```ruby\n ab_finished(:button_color_on_login)\n ab_finished(:button_color_on_signup)\n```\n\n**Additional Configuration**:\n* Be sure to enable `allow_multiple_experiments`\n* In Sinatra include the CombinedExperimentsHelper\n ```\n helpers Split::CombinedExperimentsHelper\n ```\n### DB failover solution\n\nDue to the fact that Redis has no automatic failover mechanism, it's\npossible to switch on the `db_failover` config option, so that `ab_test`\nand `ab_finished` will not crash in case of a db failure. `ab_test` always\ndelivers alternative A (the first one) in that case.\n\nIt's also possible to set a `db_failover_on_db_error` callback (proc)\nfor example to log these errors via Rails.logger.\n\n### Redis\n\nYou may want to change the Redis host and port Split connects to, or\nset various other options at startup.\n\nSplit has a `redis` setter which can be given a string or a Redis\nobject. This means if you're already using Redis in your app, Split\ncan re-use the existing connection.\n\nString: `Split.redis = 'redis://localhost:6379'`\n\nRedis: `Split.redis = $redis`\n\nFor our rails app we have a `config/initializers/split.rb` file where\nwe load `config/split.yml` by hand and set the Redis information\nappropriately.\n\nHere's our `config/split.yml`:\n\n```yml\ndevelopment: redis://localhost:6379\ntest: redis://localhost:6379\nstaging: redis://redis1.example.com:6379\nfi: redis://localhost:6379\nproduction: redis://redis1.example.com:6379\n```\n\nAnd our initializer:\n\n```ruby\nsplit_config = YAML.load_file(Rails.root.join('config', 'split.yml'))\nSplit.redis = split_config[Rails.env]\n```\n\n### Redis Caching (v4.0+)\n\nIn some high-volume usage scenarios, Redis load can be incurred by repeated \nfetches for fairly static data. Enabling caching will reduce this load.\n\n ```ruby\nSplit.configuration.cache = true\n````\n\nThis currently caches:\n - `Split::ExperimentCatalog.find`\n - `Split::Experiment.start_time`\n - `Split::Experiment.winner`\n\n## Namespaces\n\nIf you're running multiple, separate instances of Split you may want\nto namespace the keyspaces so they do not overlap. This is not unlike\nthe approach taken by many memcached clients.\n\nThis feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace)\nlibrary. To configure Split to use `Redis::Namespace`, do the following:\n\n1. Add `redis-namespace` to your Gemfile:\n\n ```ruby\n gem 'redis-namespace'\n ```\n\n2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an\n initializer):\n\n ```ruby\n redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want\n Split.redis = Redis::Namespace.new(:your_namespace, redis: redis)\n ```\n\n## Outside of a Web Session\n\nSplit provides the Helper module to facilitate running experiments inside web sessions.\n\nAlternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to\nconduct experiments that are not tied to a web session.\n\n```ruby\n# create a new experiment\nexperiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue')\n# create a new trial\ntrial = Split::Trial.new(:experiment => experiment)\n# run trial\ntrial.choose!\n# get the result, returns either red or blue\ntrial.alternative.name\n\n# if the goal has been achieved, increment the successful completions for this alternative.\nif goal_achieved?\n trial.complete!\nend\n\n```\n\n## Algorithms\n\nBy default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test.\nIt is possible to specify static weights to favor certain alternatives.\n\n`Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132).\nThis algorithm will automatically weight the alternatives based on their relative performance,\nchoosing the better-performing ones more often as trials are completed.\n\n`Split::Algorithms::BlockRandomization` is an algorithm that ensures equal\nparticipation across all alternatives. This algorithm will choose the alternative\nwith the fewest participants. In the event of multiple minimum participant alternatives\n(i.e. starting a new \"Block\") the algorithm will choose a random alternative from\nthose minimum participant alternatives.\n\nUsers may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file.\n\nTo change the algorithm globally for all experiments, use the following in your initializer:\n\n```ruby\nSplit.configure do |config|\n config.algorithm = Split::Algorithms::Whiplash\nend\n```\n\n## Extensions\n\n - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split.\n - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics.\n - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis).\n - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test.\n - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative.\n - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests.\n\n## Screencast\n\nRyan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split)\n\n## Blogposts\n\n* [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem)\n* [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html)\n\n## Backers\n\nSupport us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Sponsors\n\nBecome a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Contribute\n\nPlease do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors.\n\n### Development\n\nThe source code is hosted at [GitHub](https://github.com/splitrb/split).\n\nReport issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues).\n\nYou can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby).\n\n### Tests\n\nRun the tests like this:\n\n # Start a Redis server in another tab.\n redis-server\n\n bundle\n rake spec\n\n### A Note on Patches and Pull Requests\n\n * Fork the project.\n * Make your feature addition or bug fix.\n * Add tests for it. This is important so I don't break it in a\n future version unintentionally.\n * Add documentation if necessary.\n * Commit. Do not mess with the rakefile, version, or history.\n (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.)\n * Send a pull request. Bonus points for topic branches.\n\n### Code of Conduct\n\nPlease note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.\n\n## Copyright\n\n[MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew).\n\n Merge pull request #358 from andreibondarev/fix-readme-typo\n\nFix typo in README\n @@ -36,7 +36,7 @@ gem install split\n \n ### Rails\n \n-Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'.\n+Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured redis it will 'just work'.\n \n ### Sinatra\n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #358 from andreibondarev/fix-readme-typo"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".md"},"lang":{"kind":"string","value":"md"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675195,"cells":{"id":{"kind":"string","value":"10070845"},"text":{"kind":"string","value":" safemarkup.py\n ADDFILE\n Catch restructuredtext exceptions, just show as text instead.\n\n @@ -0,0 +1,28 @@\n+from django import template\n+from django.conf import settings\n+from django.utils.encoding import smart_str, force_unicode\n+from django.utils.safestring import mark_safe\n+\n+register = template.Library()\n+\n+\n+def saferst(value):\n+ try:\n+ from docutils.core import publish_parts\n+ except ImportError:\n+ return force_unicode(value)\n+\n+ docutils_setttings = getattr(settings, \"RESTRUCTUREDTEXT_FILTER_SETTINGS\",\n+ dict())\n+ \n+ try:\n+ parts = publish_parts(source=smart_str(value),\n+ writer_name=\"html4css1\",\n+ settings_overrides=docutils_settings)\n+ except:\n+ return foce_unicode(value)\n+ else:\n+ return mark_safe(force_unicode(parts[\"fragment\"]))\n+saferst.is_safe = True\n+register.filter(saferst)\n+\n"},"addition_count":{"kind":"number","value":28,"string":"28"},"commit_subject":{"kind":"string","value":"Catch restructuredtext exceptions, just show as text instead."},"deletion_count":{"kind":"number","value":0,"string":"0"},"file_extension":{"kind":"string","value":".py"},"lang":{"kind":"string","value":"py"},"license":{"kind":"string","value":"bsd-3-clause"},"repo_name":{"kind":"string","value":"ask/chishop"}}},{"rowIdx":10675196,"cells":{"id":{"kind":"string","value":"10070846"},"text":{"kind":"string","value":" layout.erb\n \n\n\n\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\n\n\nSplit\n\n\n\n
\n

Split Dashboard

\n

<%= @current_env %>

\n
\n\n
\n <%= yield %>\n
\n\n
\n

Powered by Split v<%=Split::VERSION %>

\n
\n\n\n\n Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https\n\nFix URLs to replace http with https\n @@ -21,7 +21,7 @@\n
\n \n
\n-

Powered by Split v<%=Split::VERSION %>

\n+

Powered by Split v<%=Split::VERSION %>

\n
\n \n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675197,"cells":{"id":{"kind":"string","value":"10070847"},"text":{"kind":"string","value":" layout.erb\n \n\n\n\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\n\n\nSplit\n\n\n\n
\n

Split Dashboard

\n

<%= @current_env %>

\n
\n\n
\n <%= yield %>\n
\n\n
\n

Powered by Split v<%=Split::VERSION %>

\n
\n\n\n\n Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https\n\nFix URLs to replace http with https\n @@ -21,7 +21,7 @@\n
\n \n
\n-

Powered by Split v<%=Split::VERSION %>

\n+

Powered by Split v<%=Split::VERSION %>

\n
\n \n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675198,"cells":{"id":{"kind":"string","value":"10070848"},"text":{"kind":"string","value":" layout.erb\n \n\n\n\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\" media=\"screen\" rel=\"stylesheet\" type=\"text/css\">\n\n\n\nSplit\n\n\n\n
\n

Split Dashboard

\n

<%= @current_env %>

\n
\n\n
\n <%= yield %>\n
\n\n
\n

Powered by Split v<%=Split::VERSION %>

\n
\n\n\n\n Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https\n\nFix URLs to replace http with https\n @@ -21,7 +21,7 @@\n
\n \n
\n-

Powered by Split v<%=Split::VERSION %>

\n+

Powered by Split v<%=Split::VERSION %>

\n
\n \n \n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".erb"},"lang":{"kind":"string","value":"erb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}},{"rowIdx":10675199,"cells":{"id":{"kind":"string","value":"10070849"},"text":{"kind":"string","value":" redis_adapter.rb\n # frozen_string_literal: true\n\nmodule Split\n module Persistence\n class RedisAdapter\n DEFAULT_CONFIG = { namespace: \"persistence\" }.freeze\n\n attr_reader :redis_key\n\n def initialize(context, key = nil)\n if key\n @redis_key = \"#{self.class.config[:namespace]}:#{key}\"\n elsif lookup_by = self.class.config[:lookup_by]\n if lookup_by.respond_to?(:call)\n key_frag = lookup_by.call(context)\n else\n key_frag = context.send(lookup_by)\n end\n @redis_key = \"#{self.class.config[:namespace]}:#{key_frag}\"\n else\n raise \"Please configure lookup_by\"\n end\n end\n\n def [](field)\n Split.redis.hget(redis_key, field)\n end\n\n def []=(field, value)\n Split.redis.hset(redis_key, field, value)\n expire_seconds = self.class.config[:expire_seconds]\n Split.redis.expire(redis_key, expire_seconds) if expire_seconds\n end\n\n def delete(field)\n Split.redis.hdel(redis_key, field)\n end\n\n def keys\n Split.redis.hkeys(redis_key)\n end\n\n def self.find(user_id)\n new(nil, user_id)\n end\n\n def self.with_config(options={})\n self.config.merge!(options)\n self\n end\n\n def self.config\n @config ||= DEFAULT_CONFIG.dup\n end\n\n def self.reset_config!\n @config = DEFAULT_CONFIG.dup\n end\n end\n end\nend\n\n Fix Layout/SpaceAroundEqualsInParameterDefault\n\n @@ -44,7 +44,7 @@ module Split\n new(nil, user_id)\n end\n \n- def self.with_config(options={})\n+ def self.with_config(options = {})\n self.config.merge!(options)\n self\n end\n"},"addition_count":{"kind":"number","value":1,"string":"1"},"commit_subject":{"kind":"string","value":"Fix Layout/SpaceAroundEqualsInParameterDefault"},"deletion_count":{"kind":"number","value":1,"string":"1"},"file_extension":{"kind":"string","value":".rb"},"lang":{"kind":"string","value":"rb"},"license":{"kind":"string","value":"mit"},"repo_name":{"kind":"string","value":"splitrb/split"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":106751,"numItemsPerPage":100,"numTotalItems":10676919,"offset":10675100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjczNzY2MSwic3ViIjoiL2RhdGFzZXRzL0NhcnBlckFJL2dpdGh1Yi1kaWZmcy1kZWR1cGVkIiwiZXhwIjoxNzU2NzQxMjYxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.6e8OJQF4U6VJgz5wk4hROBIdO73f-kzBtXZ9Uk31mUvdWtlTIZMZT6Trn6V9mqtpiXGDYhMuXfYJp2xDNPcjDA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
Search is not available for this dataset
id
stringlengths
1
8
text
stringlengths
72
9.81M
addition_count
int64
0
10k
commit_subject
stringlengths
0
3.7k
deletion_count
int64
0
8.43k
file_extension
stringlengths
0
32
lang
stringlengths
1
94
license
stringclasses
10 values
repo_name
stringlengths
9
59
10070750
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" require "split/experiment" require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it "does not remove other keys" do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> add a simple way to load users outside web session <DFF> @@ -72,6 +72,23 @@ describe Split::User do end end + context 'allows user to be loaded from adapter' do + it 'loads user from adapter (RedisAdapter)' do + user = Split::Persistence::RedisAdapter.new(nil, 112233) + user['foo'] = 'bar' + + ab_user = Split::User.find(112233, :redis) + + expect(ab_user['foo']).to eql('bar') + end + + it 'returns nil if adapter does not implement a finder method' do + ab_user = Split::User.find(112233, :dual_adapter) + expect(ab_user).to be_nil + end + + end + context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) }
17
add a simple way to load users outside web session
0
.rb
rb
mit
splitrb/split
10070751
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 2 MINOR = 0 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.1.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split MAJOR = 2 - MINOR = 0 + MINOR = 1 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v2.1.0
1
.rb
rb
mit
splitrb/split
10070752
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 2 MINOR = 0 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.1.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split MAJOR = 2 - MINOR = 0 + MINOR = 1 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v2.1.0
1
.rb
rb
mit
splitrb/split
10070753
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 2 MINOR = 0 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.1.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split MAJOR = 2 - MINOR = 0 + MINOR = 1 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v2.1.0
1
.rb
rb
mit
splitrb/split
10070754
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split currently requires Ruby 1.9.3 or higher. If your project requires compatibility with Ruby 1.8.x and Rails 2.3, please use v0.8.0. Split uses Redis as a datastore. Split only supports Redis 2.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Update Ruby and Rails requirements on README <DFF> @@ -19,11 +19,13 @@ Split is designed to be hacker friendly, allowing for maximum customisation and ### Requirements -Split currently requires Ruby 1.9.3 or higher. If your project requires compatibility with Ruby 1.8.x and Rails 2.3, please use v0.8.0. +Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. + +If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. -Split only supports Redis 2.0 or greater. +Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis:
4
Update Ruby and Rails requirements on README
2
.md
md
mit
splitrb/split
10070755
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split currently requires Ruby 1.9.3 or higher. If your project requires compatibility with Ruby 1.8.x and Rails 2.3, please use v0.8.0. Split uses Redis as a datastore. Split only supports Redis 2.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Update Ruby and Rails requirements on README <DFF> @@ -19,11 +19,13 @@ Split is designed to be hacker friendly, allowing for maximum customisation and ### Requirements -Split currently requires Ruby 1.9.3 or higher. If your project requires compatibility with Ruby 1.8.x and Rails 2.3, please use v0.8.0. +Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. + +If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. -Split only supports Redis 2.0 or greater. +Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis:
4
Update Ruby and Rails requirements on README
2
.md
md
mit
splitrb/split
10070756
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split currently requires Ruby 1.9.3 or higher. If your project requires compatibility with Ruby 1.8.x and Rails 2.3, please use v0.8.0. Split uses Redis as a datastore. Split only supports Redis 2.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Update Ruby and Rails requirements on README <DFF> @@ -19,11 +19,13 @@ Split is designed to be hacker friendly, allowing for maximum customisation and ### Requirements -Split currently requires Ruby 1.9.3 or higher. If your project requires compatibility with Ruby 1.8.x and Rails 2.3, please use v0.8.0. +Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.0. + +If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. -Split only supports Redis 2.0 or greater. +Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis:
4
Update Ruby and Rails requirements on README
2
.md
md
mit
splitrb/split
10070757
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end @params = {'link_color' => 'blue'} alternative = ab_test('link_color', 'blue', 'red') alternative.should eql('blue') end it "should allow passing a block" do new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Fix bug: overriding alternatives doesn't work for weighted alternatives <DFF> @@ -50,6 +50,13 @@ describe Split::Helper do @params = {'link_color' => 'blue'} alternative = ab_test('link_color', 'blue', 'red') alternative.should eql('blue') + alternative = ab_test('link_color', 'blue' => 1, 'red' => 5) + alternative.should eql('blue') + @params = {'link_color' => 'red'} + alternative = ab_test('link_color', 'blue', 'red') + alternative.should eql('red') + alternative = ab_test('link_color', 'blue' => 5, 'red' => 1) + alternative.should eql('red') end it "should allow passing a block" do
7
Fix bug: overriding alternatives doesn't work for weighted alternatives
0
.rb
rb
mit
splitrb/split
10070758
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end @params = {'link_color' => 'blue'} alternative = ab_test('link_color', 'blue', 'red') alternative.should eql('blue') end it "should allow passing a block" do new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Fix bug: overriding alternatives doesn't work for weighted alternatives <DFF> @@ -50,6 +50,13 @@ describe Split::Helper do @params = {'link_color' => 'blue'} alternative = ab_test('link_color', 'blue', 'red') alternative.should eql('blue') + alternative = ab_test('link_color', 'blue' => 1, 'red' => 5) + alternative.should eql('blue') + @params = {'link_color' => 'red'} + alternative = ab_test('link_color', 'blue', 'red') + alternative.should eql('red') + alternative = ab_test('link_color', 'blue' => 5, 'red' => 1) + alternative.should eql('red') end it "should allow passing a block" do
7
Fix bug: overriding alternatives doesn't work for weighted alternatives
0
.rb
rb
mit
splitrb/split
10070759
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end @params = {'link_color' => 'blue'} alternative = ab_test('link_color', 'blue', 'red') alternative.should eql('blue') end it "should allow passing a block" do new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Fix bug: overriding alternatives doesn't work for weighted alternatives <DFF> @@ -50,6 +50,13 @@ describe Split::Helper do @params = {'link_color' => 'blue'} alternative = ab_test('link_color', 'blue', 'red') alternative.should eql('blue') + alternative = ab_test('link_color', 'blue' => 1, 'red' => 5) + alternative.should eql('blue') + @params = {'link_color' => 'red'} + alternative = ab_test('link_color', 'blue', 'red') + alternative.should eql('red') + alternative = ab_test('link_color', 'blue' => 5, 'red' => 1) + alternative.should eql('red') end it "should allow passing a block" do
7
Fix bug: overriding alternatives doesn't work for weighted alternatives
0
.rb
rb
mit
splitrb/split
10070760
<NME> _experiment.erb <BEF> <% unless goal.nil? %> <% experiment_class = "experiment experiment_with_goal" %> <% else %> <% experiment_class = "experiment" %> <% end %> <% experiment.calc_winning_alternatives %> <% extra_columns = [] experiment.alternatives.each do |alternative| extra_info = alternative.extra_info || {} extra_columns += extra_info.keys end extra_columns.uniq! <th>Non-finished</th> <th>Completed</th> <th>Conversion Rate</th> <th>Z-Score</th> <th>Winner</th> </tr> <% total_participants = total_completed = 0 %> end %> <div class="<%= experiment_class %>" data-name="<%= experiment.name %>" data-complete="<%= experiment.has_winner? %>"> <div class="experiment-header"> <h2> Experiment: <%= experiment.name %> <% if experiment.version > 1 %><span class='version'>v<%= experiment.version %></span><% end %> <% unless goal.nil? %><span class='goal'>Goal:<%= goal %></span><% end %> <% metrics = @metrics.select {|metric| metric.experiments.include? experiment} %> <% unless metrics.empty? %> <span class='goal'>Metrics:<%= metrics.map(&:name).join(', ') %></span> <% end %> </h2> <% if goal.nil? %> <div class='inline-controls'> <small><%= experiment.start_time ? experiment.start_time.strftime('%Y-%m-%d') : 'Unknown' %></small> <%= erb :_controls, :locals => {:experiment => experiment} %> </div> <% end %> <% end %> <% end %> </td> <td><%= round(alternative.z_score, 3) %></td> <td> <% if experiment.winner %> <% if experiment.winner.name == alternative.name %> <th><%= column %></th> <% end %> <th> <form> <select id="dropdown-<%=experiment.jstring(goal)%>" name="dropdown-<%=experiment.jstring(goal)%>"> <option value="confidence-<%=experiment.jstring(goal)%>">Confidence</option> <option value="probability-<%=experiment.jstring(goal)%>">Probability of being Winner</option> </select> </form> </th> <th>Finish</th> </tr> <% total_participants = total_completed = total_unfinished = 0 %> <% experiment.alternatives.each do |alternative| %> <tr> <td> <%= alternative.name %> <% if alternative.control? %> <em>control</em> <% end %> <form action="<%= url('force_alternative') + '?experiment=' + experiment.name %>" method='post'> <input type='hidden' name='alternative' value='<%= h alternative.name %>'> <input type="submit" value="Force for current user" class="green"> </form> </td> <td><%= alternative.participant_count %></td> <td><%= alternative.unfinished_count %></td> <td><%= alternative.completed_count(goal) %></td> <td> <%= number_to_percentage(alternative.conversion_rate(goal)) %>% <% if experiment.control.conversion_rate(goal) > 0 && !alternative.control? %> <% if alternative.conversion_rate(goal) > experiment.control.conversion_rate(goal) %> <span class='better'> +<%= number_to_percentage((alternative.conversion_rate(goal)/experiment.control.conversion_rate(goal))-1) %>% </span> <% elsif alternative.conversion_rate(goal) < experiment.control.conversion_rate(goal) %> <span class='worse'> <%= number_to_percentage((alternative.conversion_rate(goal)/experiment.control.conversion_rate(goal))-1) %>% </span> <% end %> <% end %> </td> <script type="text/javascript" id="sourcecode"> $(document).ready(function(){ $('.probability-<%=experiment.jstring(goal)%>').hide(); $('#dropdown-<%=experiment.jstring(goal)%>').change(function() { $('.box-<%=experiment.jstring(goal)%>').hide(); $('.' + $(this).val()).show(); }); }); </script> <% extra_columns.each do |column| %> <td><%= alternative.extra_info && alternative.extra_info[column] %></td> <% end %> <td> <div class="box-<%=experiment.jstring(goal)%> confidence-<%=experiment.jstring(goal)%>"> <span title='z-score: <%= round(alternative.z_score(goal), 3) %>'><%= confidence_level(alternative.z_score(goal)) %></span> <br> </div> <div class="box-<%=experiment.jstring(goal)%> probability-<%=experiment.jstring(goal)%>"> <span title="p_winner: <%= round(alternative.p_winner(goal), 3) %>"><%= number_to_percentage(round(alternative.p_winner(goal), 3)) %>%</span> </div> </td> <td> <% if experiment.has_winner? %> <% if experiment.winner.name == alternative.name %> Winner <% else %> Loser <% end %> <% else %> <form action="<%= url('experiment') + '?experiment=' + experiment.name %>" method='post' onclick="return confirmWinner()"> <input type='hidden' name='alternative' value='<%= h alternative.name %>'> <input type="submit" value="Use this" class="green"> </form> <% end %> </td> </tr> <% total_participants += alternative.participant_count %> <% total_unfinished += alternative.unfinished_count %> <% total_completed += alternative.completed_count(goal) %> <% end %> <tr class="totals"> <td>Totals</td> <td><%= total_participants %></td> <td><%= total_unfinished %></td> <td><%= total_completed %></td> <td>N/A</td> <% extra_columns.each do |column| %> <td> <%= summary_texts[column] %> </td> <% end %> <td>N/A</td> <td>N/A</td> </tr> </table> </div> <MSG> Show confidence level instead of zscore in dashboard <DFF> @@ -16,8 +16,8 @@ <th>Non-finished</th> <th>Completed</th> <th>Conversion Rate</th> - <th>Z-Score</th> - <th>Winner</th> + <th>Confidence</th> + <th>Finish</th> </tr> <% total_participants = total_completed = 0 %> @@ -46,7 +46,9 @@ <% end %> <% end %> </td> - <td><%= round(alternative.z_score, 3) %></td> + <td> + <span title='z-score: <%= round(alternative.z_score, 3) %>'><%= confidence_level(alternative.z_score) %></span> + </td> <td> <% if experiment.winner %> <% if experiment.winner.name == alternative.name %>
5
Show confidence level instead of zscore in dashboard
3
.erb
erb
mit
splitrb/split
10070761
<NME> _experiment.erb <BEF> <% unless goal.nil? %> <% experiment_class = "experiment experiment_with_goal" %> <% else %> <% experiment_class = "experiment" %> <% end %> <% experiment.calc_winning_alternatives %> <% extra_columns = [] experiment.alternatives.each do |alternative| extra_info = alternative.extra_info || {} extra_columns += extra_info.keys end extra_columns.uniq! <th>Non-finished</th> <th>Completed</th> <th>Conversion Rate</th> <th>Z-Score</th> <th>Winner</th> </tr> <% total_participants = total_completed = 0 %> end %> <div class="<%= experiment_class %>" data-name="<%= experiment.name %>" data-complete="<%= experiment.has_winner? %>"> <div class="experiment-header"> <h2> Experiment: <%= experiment.name %> <% if experiment.version > 1 %><span class='version'>v<%= experiment.version %></span><% end %> <% unless goal.nil? %><span class='goal'>Goal:<%= goal %></span><% end %> <% metrics = @metrics.select {|metric| metric.experiments.include? experiment} %> <% unless metrics.empty? %> <span class='goal'>Metrics:<%= metrics.map(&:name).join(', ') %></span> <% end %> </h2> <% if goal.nil? %> <div class='inline-controls'> <small><%= experiment.start_time ? experiment.start_time.strftime('%Y-%m-%d') : 'Unknown' %></small> <%= erb :_controls, :locals => {:experiment => experiment} %> </div> <% end %> <% end %> <% end %> </td> <td><%= round(alternative.z_score, 3) %></td> <td> <% if experiment.winner %> <% if experiment.winner.name == alternative.name %> <th><%= column %></th> <% end %> <th> <form> <select id="dropdown-<%=experiment.jstring(goal)%>" name="dropdown-<%=experiment.jstring(goal)%>"> <option value="confidence-<%=experiment.jstring(goal)%>">Confidence</option> <option value="probability-<%=experiment.jstring(goal)%>">Probability of being Winner</option> </select> </form> </th> <th>Finish</th> </tr> <% total_participants = total_completed = total_unfinished = 0 %> <% experiment.alternatives.each do |alternative| %> <tr> <td> <%= alternative.name %> <% if alternative.control? %> <em>control</em> <% end %> <form action="<%= url('force_alternative') + '?experiment=' + experiment.name %>" method='post'> <input type='hidden' name='alternative' value='<%= h alternative.name %>'> <input type="submit" value="Force for current user" class="green"> </form> </td> <td><%= alternative.participant_count %></td> <td><%= alternative.unfinished_count %></td> <td><%= alternative.completed_count(goal) %></td> <td> <%= number_to_percentage(alternative.conversion_rate(goal)) %>% <% if experiment.control.conversion_rate(goal) > 0 && !alternative.control? %> <% if alternative.conversion_rate(goal) > experiment.control.conversion_rate(goal) %> <span class='better'> +<%= number_to_percentage((alternative.conversion_rate(goal)/experiment.control.conversion_rate(goal))-1) %>% </span> <% elsif alternative.conversion_rate(goal) < experiment.control.conversion_rate(goal) %> <span class='worse'> <%= number_to_percentage((alternative.conversion_rate(goal)/experiment.control.conversion_rate(goal))-1) %>% </span> <% end %> <% end %> </td> <script type="text/javascript" id="sourcecode"> $(document).ready(function(){ $('.probability-<%=experiment.jstring(goal)%>').hide(); $('#dropdown-<%=experiment.jstring(goal)%>').change(function() { $('.box-<%=experiment.jstring(goal)%>').hide(); $('.' + $(this).val()).show(); }); }); </script> <% extra_columns.each do |column| %> <td><%= alternative.extra_info && alternative.extra_info[column] %></td> <% end %> <td> <div class="box-<%=experiment.jstring(goal)%> confidence-<%=experiment.jstring(goal)%>"> <span title='z-score: <%= round(alternative.z_score(goal), 3) %>'><%= confidence_level(alternative.z_score(goal)) %></span> <br> </div> <div class="box-<%=experiment.jstring(goal)%> probability-<%=experiment.jstring(goal)%>"> <span title="p_winner: <%= round(alternative.p_winner(goal), 3) %>"><%= number_to_percentage(round(alternative.p_winner(goal), 3)) %>%</span> </div> </td> <td> <% if experiment.has_winner? %> <% if experiment.winner.name == alternative.name %> Winner <% else %> Loser <% end %> <% else %> <form action="<%= url('experiment') + '?experiment=' + experiment.name %>" method='post' onclick="return confirmWinner()"> <input type='hidden' name='alternative' value='<%= h alternative.name %>'> <input type="submit" value="Use this" class="green"> </form> <% end %> </td> </tr> <% total_participants += alternative.participant_count %> <% total_unfinished += alternative.unfinished_count %> <% total_completed += alternative.completed_count(goal) %> <% end %> <tr class="totals"> <td>Totals</td> <td><%= total_participants %></td> <td><%= total_unfinished %></td> <td><%= total_completed %></td> <td>N/A</td> <% extra_columns.each do |column| %> <td> <%= summary_texts[column] %> </td> <% end %> <td>N/A</td> <td>N/A</td> </tr> </table> </div> <MSG> Show confidence level instead of zscore in dashboard <DFF> @@ -16,8 +16,8 @@ <th>Non-finished</th> <th>Completed</th> <th>Conversion Rate</th> - <th>Z-Score</th> - <th>Winner</th> + <th>Confidence</th> + <th>Finish</th> </tr> <% total_participants = total_completed = 0 %> @@ -46,7 +46,9 @@ <% end %> <% end %> </td> - <td><%= round(alternative.z_score, 3) %></td> + <td> + <span title='z-score: <%= round(alternative.z_score, 3) %>'><%= confidence_level(alternative.z_score) %></span> + </td> <td> <% if experiment.winner %> <% if experiment.winner.name == alternative.name %>
5
Show confidence level instead of zscore in dashboard
3
.erb
erb
mit
splitrb/split
10070762
<NME> _experiment.erb <BEF> <% unless goal.nil? %> <% experiment_class = "experiment experiment_with_goal" %> <% else %> <% experiment_class = "experiment" %> <% end %> <% experiment.calc_winning_alternatives %> <% extra_columns = [] experiment.alternatives.each do |alternative| extra_info = alternative.extra_info || {} extra_columns += extra_info.keys end extra_columns.uniq! <th>Non-finished</th> <th>Completed</th> <th>Conversion Rate</th> <th>Z-Score</th> <th>Winner</th> </tr> <% total_participants = total_completed = 0 %> end %> <div class="<%= experiment_class %>" data-name="<%= experiment.name %>" data-complete="<%= experiment.has_winner? %>"> <div class="experiment-header"> <h2> Experiment: <%= experiment.name %> <% if experiment.version > 1 %><span class='version'>v<%= experiment.version %></span><% end %> <% unless goal.nil? %><span class='goal'>Goal:<%= goal %></span><% end %> <% metrics = @metrics.select {|metric| metric.experiments.include? experiment} %> <% unless metrics.empty? %> <span class='goal'>Metrics:<%= metrics.map(&:name).join(', ') %></span> <% end %> </h2> <% if goal.nil? %> <div class='inline-controls'> <small><%= experiment.start_time ? experiment.start_time.strftime('%Y-%m-%d') : 'Unknown' %></small> <%= erb :_controls, :locals => {:experiment => experiment} %> </div> <% end %> <% end %> <% end %> </td> <td><%= round(alternative.z_score, 3) %></td> <td> <% if experiment.winner %> <% if experiment.winner.name == alternative.name %> <th><%= column %></th> <% end %> <th> <form> <select id="dropdown-<%=experiment.jstring(goal)%>" name="dropdown-<%=experiment.jstring(goal)%>"> <option value="confidence-<%=experiment.jstring(goal)%>">Confidence</option> <option value="probability-<%=experiment.jstring(goal)%>">Probability of being Winner</option> </select> </form> </th> <th>Finish</th> </tr> <% total_participants = total_completed = total_unfinished = 0 %> <% experiment.alternatives.each do |alternative| %> <tr> <td> <%= alternative.name %> <% if alternative.control? %> <em>control</em> <% end %> <form action="<%= url('force_alternative') + '?experiment=' + experiment.name %>" method='post'> <input type='hidden' name='alternative' value='<%= h alternative.name %>'> <input type="submit" value="Force for current user" class="green"> </form> </td> <td><%= alternative.participant_count %></td> <td><%= alternative.unfinished_count %></td> <td><%= alternative.completed_count(goal) %></td> <td> <%= number_to_percentage(alternative.conversion_rate(goal)) %>% <% if experiment.control.conversion_rate(goal) > 0 && !alternative.control? %> <% if alternative.conversion_rate(goal) > experiment.control.conversion_rate(goal) %> <span class='better'> +<%= number_to_percentage((alternative.conversion_rate(goal)/experiment.control.conversion_rate(goal))-1) %>% </span> <% elsif alternative.conversion_rate(goal) < experiment.control.conversion_rate(goal) %> <span class='worse'> <%= number_to_percentage((alternative.conversion_rate(goal)/experiment.control.conversion_rate(goal))-1) %>% </span> <% end %> <% end %> </td> <script type="text/javascript" id="sourcecode"> $(document).ready(function(){ $('.probability-<%=experiment.jstring(goal)%>').hide(); $('#dropdown-<%=experiment.jstring(goal)%>').change(function() { $('.box-<%=experiment.jstring(goal)%>').hide(); $('.' + $(this).val()).show(); }); }); </script> <% extra_columns.each do |column| %> <td><%= alternative.extra_info && alternative.extra_info[column] %></td> <% end %> <td> <div class="box-<%=experiment.jstring(goal)%> confidence-<%=experiment.jstring(goal)%>"> <span title='z-score: <%= round(alternative.z_score(goal), 3) %>'><%= confidence_level(alternative.z_score(goal)) %></span> <br> </div> <div class="box-<%=experiment.jstring(goal)%> probability-<%=experiment.jstring(goal)%>"> <span title="p_winner: <%= round(alternative.p_winner(goal), 3) %>"><%= number_to_percentage(round(alternative.p_winner(goal), 3)) %>%</span> </div> </td> <td> <% if experiment.has_winner? %> <% if experiment.winner.name == alternative.name %> Winner <% else %> Loser <% end %> <% else %> <form action="<%= url('experiment') + '?experiment=' + experiment.name %>" method='post' onclick="return confirmWinner()"> <input type='hidden' name='alternative' value='<%= h alternative.name %>'> <input type="submit" value="Use this" class="green"> </form> <% end %> </td> </tr> <% total_participants += alternative.participant_count %> <% total_unfinished += alternative.unfinished_count %> <% total_completed += alternative.completed_count(goal) %> <% end %> <tr class="totals"> <td>Totals</td> <td><%= total_participants %></td> <td><%= total_unfinished %></td> <td><%= total_completed %></td> <td>N/A</td> <% extra_columns.each do |column| %> <td> <%= summary_texts[column] %> </td> <% end %> <td>N/A</td> <td>N/A</td> </tr> </table> </div> <MSG> Show confidence level instead of zscore in dashboard <DFF> @@ -16,8 +16,8 @@ <th>Non-finished</th> <th>Completed</th> <th>Conversion Rate</th> - <th>Z-Score</th> - <th>Winner</th> + <th>Confidence</th> + <th>Finish</th> </tr> <% total_participants = total_completed = 0 %> @@ -46,7 +46,9 @@ <% end %> <% end %> </td> - <td><%= round(alternative.z_score, 3) %></td> + <td> + <span title='z-score: <%= round(alternative.z_score, 3) %>'><%= confidence_level(alternative.z_score) %></span> + </td> <td> <% if experiment.winner %> <% if experiment.winner.name == alternative.name %>
5
Show confidence level instead of zscore in dashboard
3
.erb
erb
mit
splitrb/split
10070763
<NME> AUTHORS <BEF> Ask Solem <[email protected]> Rune Halvorsen <[email protected]> Russel Sim <[email protected]> Brian Rosner <[email protected]> Hugo Lopes Tavares <[email protected]> Sverre Johansen <[email protected]> Bo Shi <[email protected]> Carl Meyer <[email protected]> Vinícius das Chagas Silva <[email protected]> Vanderson Mota dos Santos <[email protected]> Stefan Foulis <[email protected]> Michael Richardson <[email protected]> Halldór Rúnarsson <[email protected]> Brent Tubbs <[email protected]> David Cramer <[email protected]> <MSG> Whoops, don't even know my own email address... <DFF> @@ -1,3 +1,3 @@ Ask Solem <[email protected]> Rune Halvorsen <[email protected]> -Russel Sim <[email protected]> +Russell Sim <[email protected]>
1
Whoops, don't even know my own email address...
1
AUTHORS
bsd-3-clause
ask/chishop
10070764
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 2 MINOR = 1 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.2.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split MAJOR = 2 - MINOR = 1 + MINOR = 2 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v2.2.0
1
.rb
rb
mit
splitrb/split
10070765
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 2 MINOR = 1 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.2.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split MAJOR = 2 - MINOR = 1 + MINOR = 2 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v2.2.0
1
.rb
rb
mit
splitrb/split
10070766
<NME> version.rb <BEF> # frozen_string_literal: true module Split MAJOR = 2 MINOR = 1 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v2.2.0 <DFF> @@ -1,7 +1,7 @@ # frozen_string_literal: true module Split MAJOR = 2 - MINOR = 1 + MINOR = 2 PATCH = 0 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v2.2.0
1
.rb
rb
mit
splitrb/split
10070767
<NME> 7.0.gemfile <BEF> source "https://rubygems.org" gem "rubocop", require: false gem "codeclimate-test-reporter" gem "appraisal" gem "codeclimate-test-reporter" gem "rails", "~> 7.0" gemspec path: "../" <MSG> Merge pull request #675 from splitrb/enable-rb-31 Add Ruby 3.1 <DFF> @@ -5,5 +5,6 @@ source "https://rubygems.org" gem "appraisal" gem "codeclimate-test-reporter" gem "rails", "~> 7.0" +gem "matrix" gemspec path: "../"
1
Merge pull request #675 from splitrb/enable-rb-31
0
.gemfile
0
mit
splitrb/split
10070768
<NME> 7.0.gemfile <BEF> source "https://rubygems.org" gem "rubocop", require: false gem "codeclimate-test-reporter" gem "appraisal" gem "codeclimate-test-reporter" gem "rails", "~> 7.0" gemspec path: "../" <MSG> Merge pull request #675 from splitrb/enable-rb-31 Add Ruby 3.1 <DFF> @@ -5,5 +5,6 @@ source "https://rubygems.org" gem "appraisal" gem "codeclimate-test-reporter" gem "rails", "~> 7.0" +gem "matrix" gemspec path: "../"
1
Merge pull request #675 from splitrb/enable-rb-31
0
.gemfile
0
mit
splitrb/split
10070769
<NME> 7.0.gemfile <BEF> source "https://rubygems.org" gem "rubocop", require: false gem "codeclimate-test-reporter" gem "appraisal" gem "codeclimate-test-reporter" gem "rails", "~> 7.0" gemspec path: "../" <MSG> Merge pull request #675 from splitrb/enable-rb-31 Add Ruby 3.1 <DFF> @@ -5,5 +5,6 @@ source "https://rubygems.org" gem "appraisal" gem "codeclimate-test-reporter" gem "rails", "~> 7.0" +gem "matrix" gemspec path: "../"
1
Merge pull request #675 from splitrb/enable-rb-31
0
.gemfile
0
mit
splitrb/split
10070770
<NME> urls.py <BEF> # -*- coding: utf-8 -*- from django.conf.urls.defaults import patterns, url, include, handler404, handler500 from django.conf import settings from django.contrib import admin admin.autodiscover() urlpatterns = patterns('') # Serve static pages. if settings.LOCAL_DEVELOPMENT: urlpatterns += patterns("django.views", url(r"%s(?P<path>.*)$" % settings.MEDIA_URL[1:], "static.serve", { "document_root": settings.MEDIA_ROOT})) urlpatterns += patterns("", # Admin interface url(r'^admin/doc/', include("django.contrib.admindocs.urls")), url(r'^admin/(.*)', admin.site.root), # Registration url(r'^accounts/', include('registration.backends.default.urls')), # The Chishop url(r'', include("djangopypi.urls")) ) <MSG> clean up MEDIA_URL: fixes #8, fixes #9 <DFF> @@ -10,7 +10,7 @@ urlpatterns = patterns('') # Serve static pages. if settings.LOCAL_DEVELOPMENT: urlpatterns += patterns("django.views", - url(r"%s(?P<path>.*)$" % settings.MEDIA_URL[1:], "static.serve", { + url(r"^%s(?P<path>.*)$" % settings.MEDIA_URL[1:], "static.serve", { "document_root": settings.MEDIA_ROOT})) urlpatterns += patterns("",
1
clean up MEDIA_URL: fixes #8, fixes #9
1
.py
py
bsd-3-clause
ask/chishop
10070771
<NME> version.rb <BEF> module Split MAJOR = 0 MINOR = 6 PATCH = 4 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v0.6.5 <DFF> @@ -1,6 +1,6 @@ module Split MAJOR = 0 MINOR = 6 - PATCH = 4 + PATCH = 5 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v0.6.5
1
.rb
rb
mit
splitrb/split
10070772
<NME> version.rb <BEF> module Split MAJOR = 0 MINOR = 6 PATCH = 4 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v0.6.5 <DFF> @@ -1,6 +1,6 @@ module Split MAJOR = 0 MINOR = 6 - PATCH = 4 + PATCH = 5 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v0.6.5
1
.rb
rb
mit
splitrb/split
10070773
<NME> version.rb <BEF> module Split MAJOR = 0 MINOR = 6 PATCH = 4 VERSION = [MAJOR, MINOR, PATCH].join('.') end <MSG> v0.6.5 <DFF> @@ -1,6 +1,6 @@ module Split MAJOR = 0 MINOR = 6 - PATCH = 4 + PATCH = 5 VERSION = [MAJOR, MINOR, PATCH].join('.') end
1
v0.6.5
1
.rb
rb
mit
splitrb/split
10070774
<NME> layout.erb <BEF> <!DOCTYPE html> <html> <head> <meta content='text/html; charset=utf-8' http-equiv='Content-Type'> <link href="<%= url 'reset.css' %>" media="screen" rel="stylesheet" type="text/css"> <link href="<%= url 'style.css' %>" media="screen" rel="stylesheet" type="text/css"> <script type="text/javascript" src='<%= url 'dashboard.js' %>'></script> <script type="text/javascript" src='<%= url 'jquery-1.11.1.min.js' %>'></script> <script type="text/javascript" src='<%= url 'dashboard-filtering.js' %>'></script> <title>Split</title> <body> <div class="header"> <h1>Split Dashboard</h1> </div> <div id="main"> <div id="main"> <%= yield %> </div> <div id="footer"> <p>Powered by <a href="https://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html> <MSG> Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment * 'dashboard-environment' of github.com:rceee/split: Made Rails ENV conditional on Rails environment existing using controller Conflicts: lib/split/dashboard/views/layout.erb <DFF> @@ -11,6 +11,7 @@ <body> <div class="header"> <h1>Split Dashboard</h1> + <p class="environment"><%= @current_env %></p> </div> <div id="main">
1
Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment
0
.erb
erb
mit
splitrb/split
10070775
<NME> layout.erb <BEF> <!DOCTYPE html> <html> <head> <meta content='text/html; charset=utf-8' http-equiv='Content-Type'> <link href="<%= url 'reset.css' %>" media="screen" rel="stylesheet" type="text/css"> <link href="<%= url 'style.css' %>" media="screen" rel="stylesheet" type="text/css"> <script type="text/javascript" src='<%= url 'dashboard.js' %>'></script> <script type="text/javascript" src='<%= url 'jquery-1.11.1.min.js' %>'></script> <script type="text/javascript" src='<%= url 'dashboard-filtering.js' %>'></script> <title>Split</title> <body> <div class="header"> <h1>Split Dashboard</h1> </div> <div id="main"> <div id="main"> <%= yield %> </div> <div id="footer"> <p>Powered by <a href="https://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html> <MSG> Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment * 'dashboard-environment' of github.com:rceee/split: Made Rails ENV conditional on Rails environment existing using controller Conflicts: lib/split/dashboard/views/layout.erb <DFF> @@ -11,6 +11,7 @@ <body> <div class="header"> <h1>Split Dashboard</h1> + <p class="environment"><%= @current_env %></p> </div> <div id="main">
1
Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment
0
.erb
erb
mit
splitrb/split
10070776
<NME> layout.erb <BEF> <!DOCTYPE html> <html> <head> <meta content='text/html; charset=utf-8' http-equiv='Content-Type'> <link href="<%= url 'reset.css' %>" media="screen" rel="stylesheet" type="text/css"> <link href="<%= url 'style.css' %>" media="screen" rel="stylesheet" type="text/css"> <script type="text/javascript" src='<%= url 'dashboard.js' %>'></script> <script type="text/javascript" src='<%= url 'jquery-1.11.1.min.js' %>'></script> <script type="text/javascript" src='<%= url 'dashboard-filtering.js' %>'></script> <title>Split</title> <body> <div class="header"> <h1>Split Dashboard</h1> </div> <div id="main"> <div id="main"> <%= yield %> </div> <div id="footer"> <p>Powered by <a href="https://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html> <MSG> Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment * 'dashboard-environment' of github.com:rceee/split: Made Rails ENV conditional on Rails environment existing using controller Conflicts: lib/split/dashboard/views/layout.erb <DFF> @@ -11,6 +11,7 @@ <body> <div class="header"> <h1>Split Dashboard</h1> + <p class="environment"><%= @current_env %></p> </div> <div id="main">
1
Merge branch 'dashboard-environment' of github.com:rceee/split into rceee-dashboard-environment
0
.erb
erb
mit
splitrb/split
10070777
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ## Development Source hosted at [GitHub](http://github.com/splitrb/split). Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues). Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby) Tests can be ran with `rake spec` ### Note on Patches/Pull Requests ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Clarify test running instructions <DFF> @@ -700,11 +700,19 @@ Over 70 different people have contributed to the project, you can see them all h ## Development +Run the tests like this: + + # Start a Redis server in another tab. + redis-server + + bundle + rake spec + Source hosted at [GitHub](http://github.com/splitrb/split). + Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues). -Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby) -Tests can be ran with `rake spec` +Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Note on Patches/Pull Requests
10
Clarify test running instructions
2
.md
md
mit
splitrb/split
10070778
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ## Development Source hosted at [GitHub](http://github.com/splitrb/split). Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues). Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby) Tests can be ran with `rake spec` ### Note on Patches/Pull Requests ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Clarify test running instructions <DFF> @@ -700,11 +700,19 @@ Over 70 different people have contributed to the project, you can see them all h ## Development +Run the tests like this: + + # Start a Redis server in another tab. + redis-server + + bundle + rake spec + Source hosted at [GitHub](http://github.com/splitrb/split). + Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues). -Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby) -Tests can be ran with `rake spec` +Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Note on Patches/Pull Requests
10
Clarify test running instructions
2
.md
md
mit
splitrb/split
10070779
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ## Development Source hosted at [GitHub](http://github.com/splitrb/split). Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues). Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby) Tests can be ran with `rake spec` ### Note on Patches/Pull Requests ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Clarify test running instructions <DFF> @@ -700,11 +700,19 @@ Over 70 different people have contributed to the project, you can see them all h ## Development +Run the tests like this: + + # Start a Redis server in another tab. + redis-server + + bundle + rake spec + Source hosted at [GitHub](http://github.com/splitrb/split). + Report Issues/Feature requests on [GitHub Issues](http://github.com/splitrb/split/issues). -Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby) -Tests can be ran with `rake spec` +Discussion at [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Note on Patches/Pull Requests
10
Clarify test running instructions
2
.md
md
mit
splitrb/split
10070780
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.email = ["[email protected]"] s.homepage = "https://github.com/andrew/split" s.summary = %q{Rack based split testing framework} s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Added License to gemspec <DFF> @@ -7,6 +7,7 @@ Gem::Specification.new do |s| s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] + s.licenses = ['MIT'] s.email = ["[email protected]"] s.homepage = "https://github.com/andrew/split" s.summary = %q{Rack based split testing framework}
1
Added License to gemspec
0
.gemspec
gemspec
mit
splitrb/split
10070781
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.email = ["[email protected]"] s.homepage = "https://github.com/andrew/split" s.summary = %q{Rack based split testing framework} s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Added License to gemspec <DFF> @@ -7,6 +7,7 @@ Gem::Specification.new do |s| s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] + s.licenses = ['MIT'] s.email = ["[email protected]"] s.homepage = "https://github.com/andrew/split" s.summary = %q{Rack based split testing framework}
1
Added License to gemspec
0
.gemspec
gemspec
mit
splitrb/split
10070782
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.email = ["[email protected]"] s.homepage = "https://github.com/andrew/split" s.summary = %q{Rack based split testing framework} s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", "bug_tracker_uri" => "https://github.com/splitrb/split/issues", "wiki_uri" => "https://github.com/splitrb/split/wiki", "mailing_list_uri" => "https://groups.google.com/d/forum/split-ruby" } s.required_ruby_version = ">= 2.5.0" s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Added License to gemspec <DFF> @@ -7,6 +7,7 @@ Gem::Specification.new do |s| s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] + s.licenses = ['MIT'] s.email = ["[email protected]"] s.homepage = "https://github.com/andrew/split" s.summary = %q{Rack based split testing framework}
1
Added License to gemspec
0
.gemspec
gemspec
mit
splitrb/split
10070783
<NME> settings.py <BEF> from conf.default import * import os DEBUG = True TEMPLATE_DEBUG = DEBUG LOCAL_DEVELOPMENT = True if LOCAL_DEVELOPMENT: import sys sys.path.append(os.path.dirname(__file__)) # if you're sloppy. DJANGOPYPI_ALLOW_VERSION_OVERWRITE = False DJANGOPYPI_RELEASE_UPLOAD_TO = 'dists' LOCAL_DEVELOPMENT=True # change to False if you do not want Django's default server to serve static pages LOCAL_DEVELOPMENT = True DATABASE_ENGINE = 'sqlite3' DATABASE_NAME = os.path.join(here, 'devdatabase.db') DATABASE_USER = '' DATABASE_PASSWORD = '' DATABASE_HOST = '' DATABASE_PORT = '' <MSG> syncing with ask repos <DFF> @@ -12,7 +12,6 @@ ADMINS = ( # if you're sloppy. DJANGOPYPI_ALLOW_VERSION_OVERWRITE = False DJANGOPYPI_RELEASE_UPLOAD_TO = 'dists' -LOCAL_DEVELOPMENT=True # change to False if you do not want Django's default server to serve static pages LOCAL_DEVELOPMENT = True
0
syncing with ask repos
1
.py
py
bsd-3-clause
ask/chishop
10070784
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use `variable_size_secure_compare` to stop length information leaking ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV["SPLIT_USERNAME"]) & ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV["SPLIT_PASSWORD"]) end # Apps without activesupport You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Avoid variable_size_secure_compare private method (#465) <DFF> @@ -439,9 +439,9 @@ You may want to password protect that page, you can do so with `Rack::Auth::Basi Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. - # - Use `variable_size_secure_compare` to stop length information leaking - ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV["SPLIT_USERNAME"]) & - ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV["SPLIT_PASSWORD"]) + # - Use digests to stop length information leaking + ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & + ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport
3
Avoid variable_size_secure_compare private method (#465)
3
.md
md
mit
splitrb/split
10070785
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use `variable_size_secure_compare` to stop length information leaking ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV["SPLIT_USERNAME"]) & ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV["SPLIT_PASSWORD"]) end # Apps without activesupport You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Avoid variable_size_secure_compare private method (#465) <DFF> @@ -439,9 +439,9 @@ You may want to password protect that page, you can do so with `Rack::Auth::Basi Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. - # - Use `variable_size_secure_compare` to stop length information leaking - ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV["SPLIT_USERNAME"]) & - ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV["SPLIT_PASSWORD"]) + # - Use digests to stop length information leaking + ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & + ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport
3
Avoid variable_size_secure_compare private method (#465)
3
.md
md
mit
splitrb/split
10070786
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use `variable_size_secure_compare` to stop length information leaking ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV["SPLIT_USERNAME"]) & ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV["SPLIT_PASSWORD"]) end # Apps without activesupport You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Avoid variable_size_secure_compare private method (#465) <DFF> @@ -439,9 +439,9 @@ You may want to password protect that page, you can do so with `Rack::Auth::Basi Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. - # - Use `variable_size_secure_compare` to stop length information leaking - ActiveSupport::SecurityUtils.variable_size_secure_compare(username, ENV["SPLIT_USERNAME"]) & - ActiveSupport::SecurityUtils.variable_size_secure_compare(password, ENV["SPLIT_PASSWORD"]) + # - Use digests to stop length information leaking + ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & + ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport
3
Avoid variable_size_secure_compare private method (#465)
3
.md
md
mit
splitrb/split
10070787
<NME> configuration.rb <BEF> # frozen_string_literal: true module Split class Configuration attr_accessor :ignore_ip_addresses attr_accessor :ignore_filter attr_accessor :db_failover attr_accessor :db_failover_on_db_error attr_accessor :db_failover_allow_parameter_override attr_accessor :allow_multiple_experiments attr_accessor :enabled attr_accessor :persistence attr_accessor :persistence_cookie_length attr_accessor :persistence_cookie_domain attr_accessor :algorithm attr_accessor :store_override attr_accessor :start_manually attr_accessor :reset_manually attr_accessor :on_trial attr_accessor :on_trial_choose attr_accessor :on_trial_complete attr_accessor :on_experiment_reset attr_accessor :on_experiment_delete attr_accessor :on_before_experiment_reset attr_accessor :on_experiment_winner_choose attr_accessor :on_before_experiment_delete attr_accessor :include_rails_helper attr_accessor :beta_probability_simulations attr_accessor :winning_alternative_recalculation_interval attr_accessor :redis attr_accessor :dashboard_pagination_default_per_page attr_accessor :cache attr_reader :experiments attr_writer :bots attr_writer :robot_regex def bots @bots ||= { # Indexers "AdsBot-Google" => "Google Adwords", "Baidu" => "Chinese search engine", "Baiduspider" => "Chinese search engine", "bingbot" => "Microsoft bing bot", "Butterfly" => "Topsy Labs", "Gigabot" => "Gigabot spider", "Googlebot" => "Google spider", "MJ12bot" => "Majestic-12 spider", "msnbot" => "Microsoft bot", "rogerbot" => "SeoMoz spider", "PaperLiBot" => "PaperLi is another content curation service", "Slurp" => "Yahoo spider", "Sogou" => "Chinese search engine", "spider" => "generic web spider", "UnwindFetchor" => "Gnip crawler", "WordPress" => "WordPress spider", "YandexAccessibilityBot" => "Yandex accessibility spider", "YandexBot" => "Yandex spider", "YandexMobileBot" => "Yandex mobile spider", "ZIBB" => "ZIBB spider", # HTTP libraries "Apache-HttpClient" => "Java http library", "AppEngine-Google" => "Google App Engine", "curl" => "curl unix CLI http client", "ColdFusion" => "ColdFusion http library", "EventMachine HttpClient" => "Ruby http library", "Go http package" => "Go http library", "Go-http-client" => "Go http library", "Java" => "Generic Java http library", "libwww-perl" => "Perl client-server library loved by script kids", "lwp-trivial" => "Another Perl library loved by script kids", "Python-urllib" => "Python http library", "PycURL" => "Python http library", "Test Certificate Info" => "C http library?", "Typhoeus" => "Ruby http library", "Wget" => "wget unix CLI http client", # URL expanders / previewers "awe.sm" => "Awe.sm URL expander", "bitlybot" => "bit.ly bot", "[email protected]" => "Linkfluence bot", "facebookexternalhit" => "facebook bot", "Facebot" => "Facebook crawler", "Feedfetcher-Google" => "Google Feedfetcher", "https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher", "LinkedInBot" => "LinkedIn bot", "LongURL" => "URL expander service", "NING" => "NING - Yet Another Twitter Swarmer", "Pinterestbot" => "Pinterest Bot", "redditbot" => "Reddit Bot", "ShortLinkTranslate" => "Link shortener", "Slackbot" => "Slackbot link expander", "TweetmemeBot" => "TweetMeMe Crawler", "Twitterbot" => "Twitter URL expander", "UnwindFetch" => "Gnip URL expander", "vkShare" => "VKontake Sharer", # Uptime monitoring "check_http" => "Nagios monitor", "GoogleStackdriverMonitoring" => "Google Cloud monitor", "NewRelicPinger" => "NewRelic monitor", "Panopta" => "Monitoring service", "Pingdom" => "Pingdom monitoring", "SiteUptime" => "Site monitoring services", "UptimeRobot" => "Monitoring service", # ??? "DigitalPersona Fingerprint Software" => "HP Fingerprint scanner", self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| if metric_name.to_sym @metrics[metric_name] ||= [] @metrics[metric_name] << Split::Experiment.new(key) end end end end def disabled? !enabled end def experiment_for(name) if normalized_experiments # TODO symbols normalized_experiments[name.to_sym] end end def metrics return @metrics if defined?(@metrics) @metrics = {} if self.experiments self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| if metric_name @metrics[metric_name.to_sym] ||= [] @metrics[metric_name.to_sym] << Split::Experiment.new(key) end end end end @metrics end def normalized_experiments return nil if @experiments.nil? experiment_config = {} @experiments.keys.each do |name| experiment_config[name.to_sym] = {} end @experiments.each do |experiment_name, settings| alternatives = if (alts = value_for(settings, :alternatives)) normalize_alternatives(alts) end experiment_data = { alternatives: alternatives, goals: value_for(settings, :goals), metadata: value_for(settings, :metadata), algorithm: value_for(settings, :algorithm), resettable: value_for(settings, :resettable) } experiment_data.each do |name, value| experiment_config[experiment_name.to_sym][name] = value if value != nil end end experiment_config end def normalize_alternatives(alternatives) given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v| p, n = a if percent = value_for(v, :percent) [p + percent, n + 1] else a end end num_without_probability = alternatives.length - num_with_probability unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0) if num_with_probability.nonzero? alternatives = alternatives.map do |v| if (name = value_for(v, :name)) && (percent = value_for(v, :percent)) { name => percent / 100.0 } elsif name = value_for(v, :name) { name => unassigned_probability } else { v => unassigned_probability } end end [alternatives.shift, alternatives] else alternatives = alternatives.dup [alternatives.shift, alternatives] end end def robot_regex @robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i end def initialize @ignore_ip_addresses = [] @ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? } @db_failover = false @db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here @on_experiment_reset = proc { |experiment| } @on_experiment_delete = proc { |experiment| } @on_before_experiment_reset = proc { |experiment| } @on_before_experiment_delete = proc { |experiment| } @on_experiment_winner_choose = proc { |experiment| } @db_failover_allow_parameter_override = false @allow_multiple_experiments = false @enabled = true @experiments = {} @persistence = Split::Persistence::SessionAdapter @persistence_cookie_length = 31536000 # One year from now @persistence_cookie_domain = nil @algorithm = Split::Algorithms::WeightedSample @include_rails_helper = true @beta_probability_simulations = 10000 @winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day @redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379") @dashboard_pagination_default_per_page = 10 end private def value_for(hash, key) if hash.kind_of?(Hash) hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym] end end def escaped_bots bots.map { |key, _| Regexp.escape(key) } end end end <MSG> fix to pass spec <DFF> @@ -111,9 +111,9 @@ module Split self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| - if metric_name.to_sym - @metrics[metric_name] ||= [] - @metrics[metric_name] << Split::Experiment.new(key) + if metric_name + @metrics[metric_name.to_sym] ||= [] + @metrics[metric_name.to_sym] << Split::Experiment.new(key) end end end
3
fix to pass spec
3
.rb
rb
mit
splitrb/split
10070788
<NME> configuration.rb <BEF> # frozen_string_literal: true module Split class Configuration attr_accessor :ignore_ip_addresses attr_accessor :ignore_filter attr_accessor :db_failover attr_accessor :db_failover_on_db_error attr_accessor :db_failover_allow_parameter_override attr_accessor :allow_multiple_experiments attr_accessor :enabled attr_accessor :persistence attr_accessor :persistence_cookie_length attr_accessor :persistence_cookie_domain attr_accessor :algorithm attr_accessor :store_override attr_accessor :start_manually attr_accessor :reset_manually attr_accessor :on_trial attr_accessor :on_trial_choose attr_accessor :on_trial_complete attr_accessor :on_experiment_reset attr_accessor :on_experiment_delete attr_accessor :on_before_experiment_reset attr_accessor :on_experiment_winner_choose attr_accessor :on_before_experiment_delete attr_accessor :include_rails_helper attr_accessor :beta_probability_simulations attr_accessor :winning_alternative_recalculation_interval attr_accessor :redis attr_accessor :dashboard_pagination_default_per_page attr_accessor :cache attr_reader :experiments attr_writer :bots attr_writer :robot_regex def bots @bots ||= { # Indexers "AdsBot-Google" => "Google Adwords", "Baidu" => "Chinese search engine", "Baiduspider" => "Chinese search engine", "bingbot" => "Microsoft bing bot", "Butterfly" => "Topsy Labs", "Gigabot" => "Gigabot spider", "Googlebot" => "Google spider", "MJ12bot" => "Majestic-12 spider", "msnbot" => "Microsoft bot", "rogerbot" => "SeoMoz spider", "PaperLiBot" => "PaperLi is another content curation service", "Slurp" => "Yahoo spider", "Sogou" => "Chinese search engine", "spider" => "generic web spider", "UnwindFetchor" => "Gnip crawler", "WordPress" => "WordPress spider", "YandexAccessibilityBot" => "Yandex accessibility spider", "YandexBot" => "Yandex spider", "YandexMobileBot" => "Yandex mobile spider", "ZIBB" => "ZIBB spider", # HTTP libraries "Apache-HttpClient" => "Java http library", "AppEngine-Google" => "Google App Engine", "curl" => "curl unix CLI http client", "ColdFusion" => "ColdFusion http library", "EventMachine HttpClient" => "Ruby http library", "Go http package" => "Go http library", "Go-http-client" => "Go http library", "Java" => "Generic Java http library", "libwww-perl" => "Perl client-server library loved by script kids", "lwp-trivial" => "Another Perl library loved by script kids", "Python-urllib" => "Python http library", "PycURL" => "Python http library", "Test Certificate Info" => "C http library?", "Typhoeus" => "Ruby http library", "Wget" => "wget unix CLI http client", # URL expanders / previewers "awe.sm" => "Awe.sm URL expander", "bitlybot" => "bit.ly bot", "[email protected]" => "Linkfluence bot", "facebookexternalhit" => "facebook bot", "Facebot" => "Facebook crawler", "Feedfetcher-Google" => "Google Feedfetcher", "https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher", "LinkedInBot" => "LinkedIn bot", "LongURL" => "URL expander service", "NING" => "NING - Yet Another Twitter Swarmer", "Pinterestbot" => "Pinterest Bot", "redditbot" => "Reddit Bot", "ShortLinkTranslate" => "Link shortener", "Slackbot" => "Slackbot link expander", "TweetmemeBot" => "TweetMeMe Crawler", "Twitterbot" => "Twitter URL expander", "UnwindFetch" => "Gnip URL expander", "vkShare" => "VKontake Sharer", # Uptime monitoring "check_http" => "Nagios monitor", "GoogleStackdriverMonitoring" => "Google Cloud monitor", "NewRelicPinger" => "NewRelic monitor", "Panopta" => "Monitoring service", "Pingdom" => "Pingdom monitoring", "SiteUptime" => "Site monitoring services", "UptimeRobot" => "Monitoring service", # ??? "DigitalPersona Fingerprint Software" => "HP Fingerprint scanner", self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| if metric_name.to_sym @metrics[metric_name] ||= [] @metrics[metric_name] << Split::Experiment.new(key) end end end end def disabled? !enabled end def experiment_for(name) if normalized_experiments # TODO symbols normalized_experiments[name.to_sym] end end def metrics return @metrics if defined?(@metrics) @metrics = {} if self.experiments self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| if metric_name @metrics[metric_name.to_sym] ||= [] @metrics[metric_name.to_sym] << Split::Experiment.new(key) end end end end @metrics end def normalized_experiments return nil if @experiments.nil? experiment_config = {} @experiments.keys.each do |name| experiment_config[name.to_sym] = {} end @experiments.each do |experiment_name, settings| alternatives = if (alts = value_for(settings, :alternatives)) normalize_alternatives(alts) end experiment_data = { alternatives: alternatives, goals: value_for(settings, :goals), metadata: value_for(settings, :metadata), algorithm: value_for(settings, :algorithm), resettable: value_for(settings, :resettable) } experiment_data.each do |name, value| experiment_config[experiment_name.to_sym][name] = value if value != nil end end experiment_config end def normalize_alternatives(alternatives) given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v| p, n = a if percent = value_for(v, :percent) [p + percent, n + 1] else a end end num_without_probability = alternatives.length - num_with_probability unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0) if num_with_probability.nonzero? alternatives = alternatives.map do |v| if (name = value_for(v, :name)) && (percent = value_for(v, :percent)) { name => percent / 100.0 } elsif name = value_for(v, :name) { name => unassigned_probability } else { v => unassigned_probability } end end [alternatives.shift, alternatives] else alternatives = alternatives.dup [alternatives.shift, alternatives] end end def robot_regex @robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i end def initialize @ignore_ip_addresses = [] @ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? } @db_failover = false @db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here @on_experiment_reset = proc { |experiment| } @on_experiment_delete = proc { |experiment| } @on_before_experiment_reset = proc { |experiment| } @on_before_experiment_delete = proc { |experiment| } @on_experiment_winner_choose = proc { |experiment| } @db_failover_allow_parameter_override = false @allow_multiple_experiments = false @enabled = true @experiments = {} @persistence = Split::Persistence::SessionAdapter @persistence_cookie_length = 31536000 # One year from now @persistence_cookie_domain = nil @algorithm = Split::Algorithms::WeightedSample @include_rails_helper = true @beta_probability_simulations = 10000 @winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day @redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379") @dashboard_pagination_default_per_page = 10 end private def value_for(hash, key) if hash.kind_of?(Hash) hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym] end end def escaped_bots bots.map { |key, _| Regexp.escape(key) } end end end <MSG> fix to pass spec <DFF> @@ -111,9 +111,9 @@ module Split self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| - if metric_name.to_sym - @metrics[metric_name] ||= [] - @metrics[metric_name] << Split::Experiment.new(key) + if metric_name + @metrics[metric_name.to_sym] ||= [] + @metrics[metric_name.to_sym] << Split::Experiment.new(key) end end end
3
fix to pass spec
3
.rb
rb
mit
splitrb/split
10070789
<NME> configuration.rb <BEF> # frozen_string_literal: true module Split class Configuration attr_accessor :ignore_ip_addresses attr_accessor :ignore_filter attr_accessor :db_failover attr_accessor :db_failover_on_db_error attr_accessor :db_failover_allow_parameter_override attr_accessor :allow_multiple_experiments attr_accessor :enabled attr_accessor :persistence attr_accessor :persistence_cookie_length attr_accessor :persistence_cookie_domain attr_accessor :algorithm attr_accessor :store_override attr_accessor :start_manually attr_accessor :reset_manually attr_accessor :on_trial attr_accessor :on_trial_choose attr_accessor :on_trial_complete attr_accessor :on_experiment_reset attr_accessor :on_experiment_delete attr_accessor :on_before_experiment_reset attr_accessor :on_experiment_winner_choose attr_accessor :on_before_experiment_delete attr_accessor :include_rails_helper attr_accessor :beta_probability_simulations attr_accessor :winning_alternative_recalculation_interval attr_accessor :redis attr_accessor :dashboard_pagination_default_per_page attr_accessor :cache attr_reader :experiments attr_writer :bots attr_writer :robot_regex def bots @bots ||= { # Indexers "AdsBot-Google" => "Google Adwords", "Baidu" => "Chinese search engine", "Baiduspider" => "Chinese search engine", "bingbot" => "Microsoft bing bot", "Butterfly" => "Topsy Labs", "Gigabot" => "Gigabot spider", "Googlebot" => "Google spider", "MJ12bot" => "Majestic-12 spider", "msnbot" => "Microsoft bot", "rogerbot" => "SeoMoz spider", "PaperLiBot" => "PaperLi is another content curation service", "Slurp" => "Yahoo spider", "Sogou" => "Chinese search engine", "spider" => "generic web spider", "UnwindFetchor" => "Gnip crawler", "WordPress" => "WordPress spider", "YandexAccessibilityBot" => "Yandex accessibility spider", "YandexBot" => "Yandex spider", "YandexMobileBot" => "Yandex mobile spider", "ZIBB" => "ZIBB spider", # HTTP libraries "Apache-HttpClient" => "Java http library", "AppEngine-Google" => "Google App Engine", "curl" => "curl unix CLI http client", "ColdFusion" => "ColdFusion http library", "EventMachine HttpClient" => "Ruby http library", "Go http package" => "Go http library", "Go-http-client" => "Go http library", "Java" => "Generic Java http library", "libwww-perl" => "Perl client-server library loved by script kids", "lwp-trivial" => "Another Perl library loved by script kids", "Python-urllib" => "Python http library", "PycURL" => "Python http library", "Test Certificate Info" => "C http library?", "Typhoeus" => "Ruby http library", "Wget" => "wget unix CLI http client", # URL expanders / previewers "awe.sm" => "Awe.sm URL expander", "bitlybot" => "bit.ly bot", "[email protected]" => "Linkfluence bot", "facebookexternalhit" => "facebook bot", "Facebot" => "Facebook crawler", "Feedfetcher-Google" => "Google Feedfetcher", "https://developers.google.com/+/web/snippet" => "Google+ Snippet Fetcher", "LinkedInBot" => "LinkedIn bot", "LongURL" => "URL expander service", "NING" => "NING - Yet Another Twitter Swarmer", "Pinterestbot" => "Pinterest Bot", "redditbot" => "Reddit Bot", "ShortLinkTranslate" => "Link shortener", "Slackbot" => "Slackbot link expander", "TweetmemeBot" => "TweetMeMe Crawler", "Twitterbot" => "Twitter URL expander", "UnwindFetch" => "Gnip URL expander", "vkShare" => "VKontake Sharer", # Uptime monitoring "check_http" => "Nagios monitor", "GoogleStackdriverMonitoring" => "Google Cloud monitor", "NewRelicPinger" => "NewRelic monitor", "Panopta" => "Monitoring service", "Pingdom" => "Pingdom monitoring", "SiteUptime" => "Site monitoring services", "UptimeRobot" => "Monitoring service", # ??? "DigitalPersona Fingerprint Software" => "HP Fingerprint scanner", self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| if metric_name.to_sym @metrics[metric_name] ||= [] @metrics[metric_name] << Split::Experiment.new(key) end end end end def disabled? !enabled end def experiment_for(name) if normalized_experiments # TODO symbols normalized_experiments[name.to_sym] end end def metrics return @metrics if defined?(@metrics) @metrics = {} if self.experiments self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| if metric_name @metrics[metric_name.to_sym] ||= [] @metrics[metric_name.to_sym] << Split::Experiment.new(key) end end end end @metrics end def normalized_experiments return nil if @experiments.nil? experiment_config = {} @experiments.keys.each do |name| experiment_config[name.to_sym] = {} end @experiments.each do |experiment_name, settings| alternatives = if (alts = value_for(settings, :alternatives)) normalize_alternatives(alts) end experiment_data = { alternatives: alternatives, goals: value_for(settings, :goals), metadata: value_for(settings, :metadata), algorithm: value_for(settings, :algorithm), resettable: value_for(settings, :resettable) } experiment_data.each do |name, value| experiment_config[experiment_name.to_sym][name] = value if value != nil end end experiment_config end def normalize_alternatives(alternatives) given_probability, num_with_probability = alternatives.inject([0, 0]) do |a, v| p, n = a if percent = value_for(v, :percent) [p + percent, n + 1] else a end end num_without_probability = alternatives.length - num_with_probability unassigned_probability = ((100.0 - given_probability) / num_without_probability / 100.0) if num_with_probability.nonzero? alternatives = alternatives.map do |v| if (name = value_for(v, :name)) && (percent = value_for(v, :percent)) { name => percent / 100.0 } elsif name = value_for(v, :name) { name => unassigned_probability } else { v => unassigned_probability } end end [alternatives.shift, alternatives] else alternatives = alternatives.dup [alternatives.shift, alternatives] end end def robot_regex @robot_regex ||= /\b(?:#{escaped_bots.join('|')})\b|\A\W*\z/i end def initialize @ignore_ip_addresses = [] @ignore_filter = proc { |request| is_robot? || is_ignored_ip_address? } @db_failover = false @db_failover_on_db_error = proc { |error| } # e.g. use Rails logger here @on_experiment_reset = proc { |experiment| } @on_experiment_delete = proc { |experiment| } @on_before_experiment_reset = proc { |experiment| } @on_before_experiment_delete = proc { |experiment| } @on_experiment_winner_choose = proc { |experiment| } @db_failover_allow_parameter_override = false @allow_multiple_experiments = false @enabled = true @experiments = {} @persistence = Split::Persistence::SessionAdapter @persistence_cookie_length = 31536000 # One year from now @persistence_cookie_domain = nil @algorithm = Split::Algorithms::WeightedSample @include_rails_helper = true @beta_probability_simulations = 10000 @winning_alternative_recalculation_interval = 60 * 60 * 24 # 1 day @redis = ENV.fetch(ENV.fetch("REDIS_PROVIDER", "REDIS_URL"), "redis://localhost:6379") @dashboard_pagination_default_per_page = 10 end private def value_for(hash, key) if hash.kind_of?(Hash) hash.has_key?(key.to_s) ? hash[key.to_s] : hash[key.to_sym] end end def escaped_bots bots.map { |key, _| Regexp.escape(key) } end end end <MSG> fix to pass spec <DFF> @@ -111,9 +111,9 @@ module Split self.experiments.each do |key, value| metrics = value_for(value, :metric) rescue nil Array(metrics).each do |metric_name| - if metric_name.to_sym - @metrics[metric_name] ||= [] - @metrics[metric_name] << Split::Experiment.new(key) + if metric_name + @metrics[metric_name.to_sym] ||= [] + @metrics[metric_name.to_sym] << Split::Experiment.new(key) end end end
3
fix to pass spec
3
.rb
rb
mit
splitrb/split
10070790
<NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) alternative = control_variable(experiment.control) if split_generically_disabled? end ensure alternative ||= control_variable(experiment.control) end if block_given? metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def override(experiment_name, alternatives) return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) end def begin_experiment(experiment, alternative_name) true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> minor code cleanup <DFF> @@ -45,7 +45,7 @@ module Split end def override(experiment_name, alternatives) - return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) + params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) end def begin_experiment(experiment, alternative_name)
1
minor code cleanup
1
.rb
rb
mit
splitrb/split
10070791
<NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) alternative = control_variable(experiment.control) if split_generically_disabled? end ensure alternative ||= control_variable(experiment.control) end if block_given? metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def override(experiment_name, alternatives) return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) end def begin_experiment(experiment, alternative_name) true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> minor code cleanup <DFF> @@ -45,7 +45,7 @@ module Split end def override(experiment_name, alternatives) - return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) + params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) end def begin_experiment(experiment, alternative_name)
1
minor code cleanup
1
.rb
rb
mit
splitrb/split
10070792
<NME> helper.rb <BEF> # frozen_string_literal: true module Split module Helper OVERRIDE_PARAM_NAME = "ab_test" module_function def ab_test(metric_descriptor, control = nil, *alternatives) begin experiment = ExperimentCatalog.find_or_initialize(metric_descriptor, control, *alternatives) alternative = if Split.configuration.enabled && !exclude_visitor? experiment.save raise(Split::InvalidExperimentsFormatError) unless (Split.configuration.experiments || {}).fetch(experiment.name.to_sym, {})[:combined_experiments].nil? trial = Trial.new(user: ab_user, experiment: experiment, override: override_alternative(experiment.name), exclude: exclude_visitor?, disabled: split_generically_disabled?) alt = trial.choose!(self) alt ? alt.name : nil else control_variable(experiment.control) end rescue Errno::ECONNREFUSED, Redis::BaseError, SocketError => e raise(e) unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) if Split.configuration.db_failover_allow_parameter_override alternative = override_alternative(experiment.name) if override_present?(experiment.name) alternative = control_variable(experiment.control) if split_generically_disabled? end ensure alternative ||= control_variable(experiment.control) end if block_given? metadata = experiment.metadata[alternative] if experiment.metadata yield(alternative, metadata || {}) else alternative end end def reset!(experiment) ab_user.delete(experiment.key) end def override(experiment_name, alternatives) return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) end def begin_experiment(experiment, alternative_name) true else alternative_name = ab_user[experiment.key] trial = Trial.new( user: ab_user, experiment: experiment, alternative: alternative_name, goals: options[:goals], ) trial.complete!(self) if should_reset reset!(experiment) else ab_user[experiment.finished_key] = true end end end def ab_finished(metric_descriptor, options = { reset: true }) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, goals = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| next if override_present?(experiment.key) finish_experiment(experiment, options.merge(goals: goals)) end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_record_extra_info(metric_descriptor, key, value = 1) return if exclude_visitor? || Split.configuration.disabled? metric_descriptor, _ = normalize_metric(metric_descriptor) experiments = Metric.possible_experiments(metric_descriptor) if experiments.any? experiments.each do |experiment| alternative_name = ab_user[experiment.key] if alternative_name alternative = experiment.alternatives.find { |alt| alt.name == alternative_name } alternative.record_extra_info(key, value) if alternative end end end rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def ab_active_experiments ab_user.active_experiments rescue => e raise unless Split.configuration.db_failover Split.configuration.db_failover_on_db_error.call(e) end def override_present?(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative(experiment_name) override_alternative_by_params(experiment_name) || override_alternative_by_cookies(experiment_name) end def override_alternative_by_params(experiment_name) defined?(params) && params[OVERRIDE_PARAM_NAME] && params[OVERRIDE_PARAM_NAME][experiment_name] end def override_alternative_by_cookies(experiment_name) return unless defined?(request) if request.cookies && request.cookies.key?("split_override") experiments = JSON.parse(request.cookies["split_override"]) rescue {} experiments[experiment_name] end end def split_generically_disabled? defined?(params) && params["SPLIT_DISABLE"] end def ab_user @ab_user ||= User.new(self) end def exclude_visitor? defined?(request) && (instance_exec(request, &Split.configuration.ignore_filter) || is_ignored_ip_address? || is_robot? || is_preview?) end def is_robot? defined?(request) && request.user_agent =~ Split.configuration.robot_regex end def is_preview? defined?(request) && defined?(request.headers) && request.headers["x-purpose"] == "preview" end def is_ignored_ip_address? return false if Split.configuration.ignore_ip_addresses.empty? Split.configuration.ignore_ip_addresses.each do |ip| return true if defined?(request) && (request.ip == ip || (ip.class == Regexp && request.ip =~ ip)) end false end def active_experiments ab_user.active_experiments end def normalize_metric(metric_descriptor) if Hash === metric_descriptor experiment_name = metric_descriptor.keys.first goals = Array(metric_descriptor.values.first) else experiment_name = metric_descriptor goals = [] end return experiment_name, goals end def control_variable(control) Hash === control ? control.keys.first.to_s : control.to_s end end end <MSG> minor code cleanup <DFF> @@ -45,7 +45,7 @@ module Split end def override(experiment_name, alternatives) - return params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) + params[experiment_name] if defined?(params) && alternatives.include?(params[experiment_name]) end def begin_experiment(experiment, alternative_name)
1
minor code cleanup
1
.rb
rb
mit
splitrb/split
10070793
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end end end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded It looks like when this feature was added, the calculation of winning alternatives was meant to take place only once per day. The #calc_winning_alternatives method was never called, which was meant to be saving the experiment's last calc_time. Update the experiment view to call this method instead of the #estimate_winning_alternative method directly. Fix caching so that the #calc_time= method is called, rather than assigning to a local variable. Update calc_time so that number of days since epoch is stored, rather than the day of month (1-31). Ensure we're comparing integer values, rather than the string value Redis returns from #hget. <DFF> @@ -428,6 +428,13 @@ describe Split::Experiment do p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end + + it "should return nil and not re-calculate probabilities if they have already been calculated today" do + experiment = Split::ExperimentCatalog.find_or_create({'link_color3' => ["purchase", "refund"]}, 'blue', 'red', 'green') + experiment_calc_time = Time.now.utc.to_i / 86400 + experiment.calc_time = experiment_calc_time + expect(experiment.calc_winning_alternatives).to be nil + end end end
7
Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded
0
.rb
rb
mit
splitrb/split
10070794
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end end end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded It looks like when this feature was added, the calculation of winning alternatives was meant to take place only once per day. The #calc_winning_alternatives method was never called, which was meant to be saving the experiment's last calc_time. Update the experiment view to call this method instead of the #estimate_winning_alternative method directly. Fix caching so that the #calc_time= method is called, rather than assigning to a local variable. Update calc_time so that number of days since epoch is stored, rather than the day of month (1-31). Ensure we're comparing integer values, rather than the string value Redis returns from #hget. <DFF> @@ -428,6 +428,13 @@ describe Split::Experiment do p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end + + it "should return nil and not re-calculate probabilities if they have already been calculated today" do + experiment = Split::ExperimentCatalog.find_or_create({'link_color3' => ["purchase", "refund"]}, 'blue', 'red', 'green') + experiment_calc_time = Time.now.utc.to_i / 86400 + experiment.calc_time = experiment_calc_time + expect(experiment.calc_winning_alternatives).to be nil + end end end
7
Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded
0
.rb
rb
mit
splitrb/split
10070795
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end it "should call the on_before_experiment_delete hook" do expect(Split.configuration.on_before_experiment_delete).to receive(:call) experiment.delete end it "should reset the start time if the experiment should be manually started" do Split.configuration.start_manually = true experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end end end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded It looks like when this feature was added, the calculation of winning alternatives was meant to take place only once per day. The #calc_winning_alternatives method was never called, which was meant to be saving the experiment's last calc_time. Update the experiment view to call this method instead of the #estimate_winning_alternative method directly. Fix caching so that the #calc_time= method is called, rather than assigning to a local variable. Update calc_time so that number of days since epoch is stored, rather than the day of month (1-31). Ensure we're comparing integer values, rather than the string value Redis returns from #hget. <DFF> @@ -428,6 +428,13 @@ describe Split::Experiment do p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end + + it "should return nil and not re-calculate probabilities if they have already been calculated today" do + experiment = Split::ExperimentCatalog.find_or_create({'link_color3' => ["purchase", "refund"]}, 'blue', 'red', 'green') + experiment_calc_time = Time.now.utc.to_i / 86400 + experiment.calc_time = experiment_calc_time + expect(experiment.calc_winning_alternatives).to be nil + end end end
7
Fix caching of winning alternative to prevent recalculation each time the dashboard is loaded
0
.rb
rb
mit
splitrb/split
10070796
<NME> _controls.erb <BEF> <% if experiment.has_winner? %> <form action="<%= url "/reopen?experiment=#{experiment.name}" %>" method='post' onclick="return confirmReopen()"> <input type="submit" value="Reopen Experiment"> </form> <% else %> <% if experiment.cohorting_disabled? %> <form action="<%= url "/update_cohorting?experiment=#{experiment.name}" %>" method='post' onclick="return confirmEnableCohorting()"> <input type="hidden" name="cohorting_action" value="enable"> <input type="submit" value="Enable Cohorting" class="green"> </form> <% else %> <input type="submit" value="Start"> </form> <% end %> <form action="<%= url "/?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form> <% if experiment.start_time %> <form action="<%= url "/reset?experiment=#{experiment.name}" %>" method='post' onclick="return confirmReset()"> <input type="submit" value="Reset Data"> </form> <% else%> <form action="<%= url "/start?experiment=#{experiment.name}" %>" method='post'> <input type="submit" value="Start"> </form> <% end %> <form action="<%= url "/experiment?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form> <MSG> Merge pull request #352 from craigmcnamara/fix-delete-path Whoops. Forgot to update the delete path. <DFF> @@ -12,7 +12,7 @@ <input type="submit" value="Start"> </form> <% end %> -<form action="<%= url "/?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> +<form action="<%= url "/experiment?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form>
1
Merge pull request #352 from craigmcnamara/fix-delete-path
1
.erb
erb
mit
splitrb/split
10070797
<NME> _controls.erb <BEF> <% if experiment.has_winner? %> <form action="<%= url "/reopen?experiment=#{experiment.name}" %>" method='post' onclick="return confirmReopen()"> <input type="submit" value="Reopen Experiment"> </form> <% else %> <% if experiment.cohorting_disabled? %> <form action="<%= url "/update_cohorting?experiment=#{experiment.name}" %>" method='post' onclick="return confirmEnableCohorting()"> <input type="hidden" name="cohorting_action" value="enable"> <input type="submit" value="Enable Cohorting" class="green"> </form> <% else %> <input type="submit" value="Start"> </form> <% end %> <form action="<%= url "/?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form> <% if experiment.start_time %> <form action="<%= url "/reset?experiment=#{experiment.name}" %>" method='post' onclick="return confirmReset()"> <input type="submit" value="Reset Data"> </form> <% else%> <form action="<%= url "/start?experiment=#{experiment.name}" %>" method='post'> <input type="submit" value="Start"> </form> <% end %> <form action="<%= url "/experiment?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form> <MSG> Merge pull request #352 from craigmcnamara/fix-delete-path Whoops. Forgot to update the delete path. <DFF> @@ -12,7 +12,7 @@ <input type="submit" value="Start"> </form> <% end %> -<form action="<%= url "/?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> +<form action="<%= url "/experiment?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form>
1
Merge pull request #352 from craigmcnamara/fix-delete-path
1
.erb
erb
mit
splitrb/split
10070798
<NME> _controls.erb <BEF> <% if experiment.has_winner? %> <form action="<%= url "/reopen?experiment=#{experiment.name}" %>" method='post' onclick="return confirmReopen()"> <input type="submit" value="Reopen Experiment"> </form> <% else %> <% if experiment.cohorting_disabled? %> <form action="<%= url "/update_cohorting?experiment=#{experiment.name}" %>" method='post' onclick="return confirmEnableCohorting()"> <input type="hidden" name="cohorting_action" value="enable"> <input type="submit" value="Enable Cohorting" class="green"> </form> <% else %> <input type="submit" value="Start"> </form> <% end %> <form action="<%= url "/?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form> <% if experiment.start_time %> <form action="<%= url "/reset?experiment=#{experiment.name}" %>" method='post' onclick="return confirmReset()"> <input type="submit" value="Reset Data"> </form> <% else%> <form action="<%= url "/start?experiment=#{experiment.name}" %>" method='post'> <input type="submit" value="Start"> </form> <% end %> <form action="<%= url "/experiment?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form> <MSG> Merge pull request #352 from craigmcnamara/fix-delete-path Whoops. Forgot to update the delete path. <DFF> @@ -12,7 +12,7 @@ <input type="submit" value="Start"> </form> <% end %> -<form action="<%= url "/?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> +<form action="<%= url "/experiment?experiment=#{experiment.name}" %>" method='post' onclick="return confirmDelete()"> <input type="hidden" name="_method" value="delete"/> <input type="submit" value="Delete" class="red"> </form>
1
Merge pull request #352 from craigmcnamara/fix-delete-path
1
.erb
erb
mit
splitrb/split
10070799
<NME> CHANGELOG.md <BEF> ## 3.4.0 (November 9th, 2019) Features: - Force experiment does not count for metrics (@andrehjr, #637) - Fix cleanup_old_versions! misbehaviour (@serggl, #661) Features: - Make goals accessible via on_trial_complete callbacks (@robin-phung, #625) - Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616) - Introduce enable/disable experiment cohorting (@robin-phung, #615) - Add on_experiment_winner_choose callback (@GenaMinenkov, #574) - Add Split::Cache to reduce load on Redis (@rdh, #648) - Caching based optimization in the experiment#save path (@amangup, #652) - Adds config option for cookie domain (@joedelia, #664) Misc: - Drop support for Ruby < 2.5 (@andrehjr, #627) - Drop support for Rails < 5 (@andrehjr, #607) - Bump minimum required redis to 4.2 (@andrehjr, #628) - Removed repeated loading from config (@robin-phung, #619) - Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632) - Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631) - Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630) - Fix typo of in `Split::Trial` class variable (TomasBarry, #644) - Single HSET to update values, instead of multiple ones (@andrehjr, #640) - Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659) - Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639) - Adding documentation related to what is stored on cookies. (@andrehjr, #634) - Keep railtie defined under the Split gem namespace (@avit, #666) - Update RSpec helper to support block syntax (@clowder, #665) ## 3.4.1 (November 12th, 2019) Bugfixes: - Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) ## 3.4.0 (November 9th, 2019) Features: - Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release. - Make dashboard pagination default "per" param configurable (@alopatin, #597) Bugfixes: - Fix `force_alternative` for experiments with incremented version (@giraffate, #568) - Persist alternative weights (@giraffate, #570) - Combined experiment performance improvements (@gnanou, #575) - Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577) - When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582) Misc: - Remove `rubyforge_project` from gemspec (@giraffate, #583) - Fix URLs to replace http with https (@giraffate , #584) - Lazily include split helpers in ActionController::Base (@hasghari, #586) - Fix unused variable warnings (@andrehjr, #592) - Fix ruby warnings (@andrehjr, #593) - Update rubocop.yml config (@andrehjr, #594) - Add frozen_string_literal to all files that were missing it (@andrehjr, #595) ## 3.3.2 (April 12th, 2019) Features: - Added uptime robot to configuration.rb (@razel1982, #556) - Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555) Bugfixes: - Fix error message interpolation (@hanibash, #553) - Fix Bigdecimal warnings (@agraves, #551) - Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544) - Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr) Misc: - Update travis to add Rails 6 (@edmilton, #559) - Fix broken specs in developement environment (@dougpetronilio, #557) ## 3.3.1 (January 11th, 2019) Features: - Filter some more bots (@janosch-x, #542) Bugfixes: - Fix Dashboard Pagination Helper typo (@cattekin, #541) - Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539) - fix user participating alternative not found (@NaturalHokke, #536) Misc: - Tweak RSpec instructions (@eliotsykes, #540) - Improve README regarding rspec usage (@vermaxik, #538) ## 3.3.0 (August 13th, 2018) Features: - Added pagination for dashboard (@GeorgeGorbanev, #518) - Add Facebot crawler to list of bots (@pfeiffer, #530) - Ignore previewing requests (@pfeiffer, #531) - Fix binding of ignore_filter (@pfeiffer, #533) Bugfixes: - Fix cookie header duplication (@andrehjr, #522) Performance: - Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509) Misc: - Update development dependencies - test rails 5.2 on travis (@lostapathy, #524) - update ruby versions for travis (@lostapathy, #525) ## 3.2.0 (September 21st, 2017) Features: - Allow configuration of how often winning alternatives are recalculated (@patbl, #501) Bugfixes: - Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503) - Fix combined experiments (@semanticart, #502) ## 3.1.1 (August 30th, 2017) Bugfixes: - Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498) Misc: - Document testing with RSpec (@eliotsykes, #495) ## 3.1.0 (August 14th, 2017) Features: - Support for combined experiments (@daviddening, #493) - Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490) - Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487) Bugfixes: - Blocked a few more common bot user agents (@kylerippey, #485) Misc: - Repository Audit by Maintainer.io (@RichardLitt, #484) - Update development dependencies - Test on ruby 2.4.1 - Test compatibility with rails 5.1 - Add uris to metadata section in gemspec ## 3.0.0 (March 30th, 2017) Features: - added block randomization algorithm and specs (@hulleywood, #475) - Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460) Bugfixes: - Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470) - Fix issue where redis isn't required (@tomciopp , #466) Misc: - Avoid variable_size_secure_compare private method (@eliotsykes, #465) ## 2.2.0 (November 11th, 2016) **Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset. Features: - Remove dependency on Redis::Namespace (@bschaeffer, #425) - Make resetting on experiment change optional (@moggyboy, #430) - Add ability to force alternative on dashboard (@ccallebs, #437) Bugfixes: - Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432) Misc: - Remove Explicit Return (@BradHudson, #441) - Update Redis config docs (@bschaeffer, #422) - Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443) - Removed a couple old ruby 1.8 hacks (@andrew, #456) - Run tests on rails 5 (@andrew, #457) - Fixed a few codeclimate warnings (@andrew, #458) - Use codeclimate for test coverage (@andrew #455) ## 2.1.0 (August 8th, 2016) Features: - Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426) ## 2.0.0 (July 17th, 2016) Breaking changes: - Removed deprecated `finished` and `begin_experiment` methods - Namespaced override param to avoid potential clashes (@henrik, #398) ## 1.7.0 (June 28th, 2016) Features: - Running concurrent experiments on same endpoint/view (@karmakaze, #421) ## 1.6.0 (June 16th, 2016) Features: - Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420) ## 1.5.0 (June 8th, 2016) Features: - Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409) - Optional custom persistence adapter (@ndelage, #411) Misc: - Use fakeredis for testing (@andrew, #412) ## 1.4.5 (June 7th, 2016) Bugfixes: - FIX Negative numbers on non-finished (@divineforest, #408) - Eliminate extra RedisAdapter hget (@karmakaze, #407) - Remove unecessary code from Experiment class (@pakallis, #391, #392, #393) Misc: - Simplify Configuration#normalized_experiments (@pakallis, #395) - Clarify test running instructions (@henrik, #397) ## 1.4.4 (May 9th, 2016) Bugfixes: - Increment participation if store override is true and no experiment key exists (@spheric, #380) Misc: - Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389) - Added minimum version requirement to simple-random - Clarify finished with first option being a hash in Readme (@henrik, #382) - Refactoring the User abstraction (@andreibondarev, #384) ## 1.4.3 (April 28th, 2016) Features: - add on_trial callback whenever a trial is started (@mtyeh411, #375) Bugfixes: - Allow algorithm configuration at experiment level (@007sumit, #376) Misc: - only choose override if it exists as valid alternative (@spheric, #377) ## 1.4.2 (April 25th, 2016) Misc: - Deprecated some legacy methods (@andreibondarev, #374) ## 1.4.1 (April 21st, 2016) Bugfixes: - respect manual start configuration after an experiment has been deleted (@mtyeh411, #372) Misc: - Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365) - Revise specs according to http://betterspecs.org/ (@hkliya, #369) ## 1.4.0 (April 2nd, 2016) Features: - Added experiment filters to dashboard (@ccallebs, #363, #364) - Added Contributor Covenant Code of Conduct ## 1.3.2 (January 2nd, 2016) Bugfixes: - Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352) ## 1.3.1 (January 1st, 2016) Bugfixes: - Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349) ## 1.3.0 (October 20th, 2015) Features: - allow for custom redis_url different from ENV variable (@davidgrieser, #323) - add ability to change the length of the persistence cookie (@peterylai, #335) Bugfixes: - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342) - Fix active experiments when experiment is on a later version (@ndrisso, #331) - Fix caching of winning alternative (@nfm, #329) Misc: - Remove duplication from Experiment#save (@pakallis, #333) - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332) ## 1.2.1 (May 17th, 2015) Features: - Handle redis DNS resolution failures gracefully (@fusion2004, #310) - Push metadata to ab_test block (@ekorneeff, #296) - Helper methods are now private when included in controllers (@ipoval, #303) Bugfixes: - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313) - Don't use capture helper from ActionView (@tomasdundacek, #312) Misc: - Remove body "max-width" from dashboard (@xicreative, #299) - fix private for class methods (@ipoval, #301) - minor memoization fix in spec (@ipoval, #304) - Minor documentation fixes (#295, #297, #305, #308) ## 1.2.0 (January 24th, 2015) Features: - Configure redis using environment variables if available (@saratovsource , #293) - Store metadata on experiment configuration (@dekz, #291) Bugfixes: - Revert the Trial#complete! public API to support noargs (@dekz, #292) ## 1.1.0 (January 9th, 2015) Changes: - Public class methods on `Split::Experiment` (e.g., `find_or_create`) have been moved to `Split::ExperimentCatalog`. Features: - Decouple trial from Split::Helper (@joshdover, #286) - Helper method for Active Experiments (@blahblahblah-, #273) Misc: - Use the new travis container based infrastructure for tests (@andrew, #280) ## 1.0.0 (October 12th, 2014) Changes: - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271) ## 0.8.0 (September 25th, 2014) Features: - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251) - support multiple metrics per experiment (@stevenou, #260) Bugfixes: - Avoiding call to params in EncapsulatedHelper (@afn, #257) ## 0.7.3 (September 16th, 2014) Features: - Disable all split tests via a URL parameter (@hwartig, #263) Bugfixes: - Correctly escape experiment names on dashboard (@ecaron, #265) - Handle redis connection exception error properly (@andrew, #245) ## 0.7.2 (June 12th, 2014) Features: - Show metrics on the dashboard (@swrobel, #241) Bugfixes: - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253) - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255) ## 0.7.1 (March 20th, 2014) Features: - You can now reopen experiment from the dashboard (@mikezaby, #235) Misc: - Internal code tidy up (@IanVaughan, #238) ## 0.7.0 (December 26th, 2013) Features: - Significantly improved z-score algorithm (@caser ,#221) - Better sorting of Experiments on dashboard (@wadako111, #218) Bugfixes: - Fixed start button not being displayed in some cases (@vigosan, #219) Misc: - Experiment#initialize refactoring (@nberger, #224) - Extract ExperimentStore into a seperate class (@nberger, #225) ## 0.6.6 (October 15th, 2013) Features: - Sort experiments on Dashboard so "active" ones without a winner appear first (@swrobel, #204) - Starting tests manually (@duksis, #209) Bugfixes: - Only trigger completion callback with valid Trial (@segfaultAX, #208) - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213) Misc: - Added more bots to filter list (@lbeder, #214, #215, #216) ## 0.6.5 (August 23, 2013) Features: - Added Redis adapter for persisting experiments across sessions (@fengb, #203) Misc: - Expand upon algorithms section in README (@swrobel, #200) ## 0.6.4 (August 8, 2013) Features: - Add hooks for experiment deletion and resetting (@craigmcnamara, #198) - Allow Split::Helper to be used outside of a controller (@nfm, #190) - Show current Rails/Rack Env in dashboard (@rceee, #187) Bugfixes: - Fix whiplash algorithm when using goals (@swrobel, #193) Misc: - Refactor dashboard js (@buddhamagnet) ## 0.6.3 (July 8, 2013) Features: - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176) Bugfixes: - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177) ## 0.6.2 (June 6, 2013) Features: - Rails 2.3 compatibility (@bhcarpenter, #167) - Adding possibility to store overridden alternative (@duksis, #173) Misc: - Now testing against multiple versions of rails ## 0.6.1 (May 4, 2013) Bugfixes: - Use the specified algorithm for the experiment instead of the default (@woodhull, #165) Misc: - Ensure experiements are valid when configuring (@ashmckenzie, #159) - Allow arrays to be passed to ab_test (@fenelon, #156) ## 0.6.0 (April 4, 2013) Features: - Support for Ruby 2.0.0 (@phoet, #142) - Multiple Goals (@liujin, #109) - Ignoring IPs using Regular Expressions (@waynemoore, #119) - Added ability to add more bots to the default list (@themgt, #140) - Allow custom configuration of user blocking logic (@phoet , #148) Bugfixes: - Fixed regression in handling of config files (@iangreenleaf, #115) - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67) - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126) Misc: - updated minimum json version requirement - Refactor Yaml Configuration (@rtwomey, #124) - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118) - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko) - Improved Readme (@iangreenleaf @phoet) ## 0.5.0 (January 28, 2013) Features: - Persistence Adapters: Cookies and Session (@patbenatar, #98) - Configure experiments from a hash (@iangreenleaf, #97) - Pluggable sampling algorithms (@woodhull, #105) Bugfixes: - Fixed negative number of non-finished rates (@philnash, #83) - Fixed behaviour of finished(:reset => false) (@philnash, #88) - Only take into consideration positive z-scores (@thomasmaas, #96) - Amended ab_test method to raise ArgumentError if passed integers or symbols as alternatives (@buddhamagnet, #81) ## 0.4.6 (October 28, 2012) Features: - General code quality improvements (@buddhamagnet, #79) Bugfixes: - Don't increment the experiment counter if user has finished (@dimko, #78) - Fixed an incorrect test (@jaywengrow, #74) ## 0.4.5 (August 30, 2012) Bugfixes: - Fixed header gradient in FF/Opera (@philnash, #69) - Fixed reseting of experiment in session (@apsoto, #43) ## 0.4.4 (August 9, 2012) Features: - Allow parameter overrides, even without Redis. (@bhcarpenter, #62) Bugfixes: - Fixes version number always increasing when alternatives are changed (@philnash, #63) - updated guard-rspec to version 1.2 ## 0.4.3 (July 8, 2012) Features: - redis failover now recovers from all redis-related exceptions ## 0.4.2 (June 1, 2012) Features: - Now works with v3.0 of redis gem Bugfixes: - Fixed redis failover on Rubinius ## 0.4.1 (April 6, 2012) Features: - Added configuration option to disable Split testing (@ilyakatz, #45) Bugfixes: - Fix weights for existing experiments (@andreas, #40) - Fixed dashboard range error (@andrew, #42) ## 0.4.0 (March 7, 2012) **IMPORTANT** If using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug. Features: - Experiments now record when they were started (@vrish88, #35) - Old versions of experiments in sessions are now cleaned up - Avoid users participating in multiple experiments at once (#21) Bugfixes: - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34) - confidence_level helper should handle tiny z-scores (#23) ## 0.3.3 (February 16, 2012) Bugfixes: - Fixed redis failover when a block was passed to ab_test (@layflags, #33) ## 0.3.2 (February 12, 2012) Features: - Handle redis errors gracefully (@layflags, #32) ## 0.3.1 (November 19, 2011) Features: - General code tidy up (@ryanlecompte, #22, @mocoso, #28) - Lazy loading data from Redis (@lautis, #25) Bugfixes: - Handle unstarted experiments (@mocoso, #27) - Relaxed Sinatra version requirement (@martinclu, #24) ## 0.3.0 (October 9, 2011) Features: - Redesigned dashboard (@mrappleton, #17) - Use atomic increments in redis for better concurrency (@lautis, #18) - Weighted alternatives Bugfixes: - Fix to allow overriding of experiments that aren't on version 1 ## 0.2.4 (July 18, 2011) Features: - Added option to finished to not reset the users session Bugfixes: - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols ## 0.2.3 (June 26, 2011) Features: - Experiments can now be deleted from the dashboard - ab_test helper now accepts a block - Improved dashboard Bugfixes: - After resetting an experiment, existing users of that experiment will also be reset ## 0.2.2 (June 11, 2011) Features: - Updated redis-namespace requirement to 1.0.3 - Added a configuration object for changing options - Robot regex can now be changed via a configuration options - Added ability to ignore visits from specified IP addresses - Dashboard now shows percentage improvement of alternatives compared to the control - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives Bugfixes: - Saving an experiment multiple times no longer creates duplicate alternatives ## 0.2.1 (May 29, 2011) Bugfixes: - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x ## 0.2.0 (May 29, 2011) Features: - Override an alternative via a url parameter - Experiments can now be reset from the dashboard - The first alternative is now considered the control - General dashboard usability improvements - Robots are ignored and given the control alternative Bugfixes: - Alternatives are now store in a list rather than a set to ensure consistent ordering - Fixed diving by zero errors ## 0.1.1 (May 18, 2011) Bugfixes: - More Robust conversion rate display on dashboard - Ensure `Split::Version` is available everywhere, fixed dashboard ## 0.1.0 (May 17, 2011) Initial Release <MSG> Add Changelog <DFF> @@ -1,3 +1,8 @@ +## 3.4.1 (November 12th, 2019) + +Bugfixes: +- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) + ## 3.4.0 (November 9th, 2019) Features:
5
Add Changelog
0
.md
md
mit
splitrb/split
10070800
<NME> CHANGELOG.md <BEF> ## 3.4.0 (November 9th, 2019) Features: - Force experiment does not count for metrics (@andrehjr, #637) - Fix cleanup_old_versions! misbehaviour (@serggl, #661) Features: - Make goals accessible via on_trial_complete callbacks (@robin-phung, #625) - Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616) - Introduce enable/disable experiment cohorting (@robin-phung, #615) - Add on_experiment_winner_choose callback (@GenaMinenkov, #574) - Add Split::Cache to reduce load on Redis (@rdh, #648) - Caching based optimization in the experiment#save path (@amangup, #652) - Adds config option for cookie domain (@joedelia, #664) Misc: - Drop support for Ruby < 2.5 (@andrehjr, #627) - Drop support for Rails < 5 (@andrehjr, #607) - Bump minimum required redis to 4.2 (@andrehjr, #628) - Removed repeated loading from config (@robin-phung, #619) - Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632) - Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631) - Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630) - Fix typo of in `Split::Trial` class variable (TomasBarry, #644) - Single HSET to update values, instead of multiple ones (@andrehjr, #640) - Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659) - Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639) - Adding documentation related to what is stored on cookies. (@andrehjr, #634) - Keep railtie defined under the Split gem namespace (@avit, #666) - Update RSpec helper to support block syntax (@clowder, #665) ## 3.4.1 (November 12th, 2019) Bugfixes: - Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) ## 3.4.0 (November 9th, 2019) Features: - Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release. - Make dashboard pagination default "per" param configurable (@alopatin, #597) Bugfixes: - Fix `force_alternative` for experiments with incremented version (@giraffate, #568) - Persist alternative weights (@giraffate, #570) - Combined experiment performance improvements (@gnanou, #575) - Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577) - When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582) Misc: - Remove `rubyforge_project` from gemspec (@giraffate, #583) - Fix URLs to replace http with https (@giraffate , #584) - Lazily include split helpers in ActionController::Base (@hasghari, #586) - Fix unused variable warnings (@andrehjr, #592) - Fix ruby warnings (@andrehjr, #593) - Update rubocop.yml config (@andrehjr, #594) - Add frozen_string_literal to all files that were missing it (@andrehjr, #595) ## 3.3.2 (April 12th, 2019) Features: - Added uptime robot to configuration.rb (@razel1982, #556) - Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555) Bugfixes: - Fix error message interpolation (@hanibash, #553) - Fix Bigdecimal warnings (@agraves, #551) - Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544) - Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr) Misc: - Update travis to add Rails 6 (@edmilton, #559) - Fix broken specs in developement environment (@dougpetronilio, #557) ## 3.3.1 (January 11th, 2019) Features: - Filter some more bots (@janosch-x, #542) Bugfixes: - Fix Dashboard Pagination Helper typo (@cattekin, #541) - Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539) - fix user participating alternative not found (@NaturalHokke, #536) Misc: - Tweak RSpec instructions (@eliotsykes, #540) - Improve README regarding rspec usage (@vermaxik, #538) ## 3.3.0 (August 13th, 2018) Features: - Added pagination for dashboard (@GeorgeGorbanev, #518) - Add Facebot crawler to list of bots (@pfeiffer, #530) - Ignore previewing requests (@pfeiffer, #531) - Fix binding of ignore_filter (@pfeiffer, #533) Bugfixes: - Fix cookie header duplication (@andrehjr, #522) Performance: - Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509) Misc: - Update development dependencies - test rails 5.2 on travis (@lostapathy, #524) - update ruby versions for travis (@lostapathy, #525) ## 3.2.0 (September 21st, 2017) Features: - Allow configuration of how often winning alternatives are recalculated (@patbl, #501) Bugfixes: - Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503) - Fix combined experiments (@semanticart, #502) ## 3.1.1 (August 30th, 2017) Bugfixes: - Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498) Misc: - Document testing with RSpec (@eliotsykes, #495) ## 3.1.0 (August 14th, 2017) Features: - Support for combined experiments (@daviddening, #493) - Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490) - Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487) Bugfixes: - Blocked a few more common bot user agents (@kylerippey, #485) Misc: - Repository Audit by Maintainer.io (@RichardLitt, #484) - Update development dependencies - Test on ruby 2.4.1 - Test compatibility with rails 5.1 - Add uris to metadata section in gemspec ## 3.0.0 (March 30th, 2017) Features: - added block randomization algorithm and specs (@hulleywood, #475) - Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460) Bugfixes: - Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470) - Fix issue where redis isn't required (@tomciopp , #466) Misc: - Avoid variable_size_secure_compare private method (@eliotsykes, #465) ## 2.2.0 (November 11th, 2016) **Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset. Features: - Remove dependency on Redis::Namespace (@bschaeffer, #425) - Make resetting on experiment change optional (@moggyboy, #430) - Add ability to force alternative on dashboard (@ccallebs, #437) Bugfixes: - Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432) Misc: - Remove Explicit Return (@BradHudson, #441) - Update Redis config docs (@bschaeffer, #422) - Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443) - Removed a couple old ruby 1.8 hacks (@andrew, #456) - Run tests on rails 5 (@andrew, #457) - Fixed a few codeclimate warnings (@andrew, #458) - Use codeclimate for test coverage (@andrew #455) ## 2.1.0 (August 8th, 2016) Features: - Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426) ## 2.0.0 (July 17th, 2016) Breaking changes: - Removed deprecated `finished` and `begin_experiment` methods - Namespaced override param to avoid potential clashes (@henrik, #398) ## 1.7.0 (June 28th, 2016) Features: - Running concurrent experiments on same endpoint/view (@karmakaze, #421) ## 1.6.0 (June 16th, 2016) Features: - Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420) ## 1.5.0 (June 8th, 2016) Features: - Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409) - Optional custom persistence adapter (@ndelage, #411) Misc: - Use fakeredis for testing (@andrew, #412) ## 1.4.5 (June 7th, 2016) Bugfixes: - FIX Negative numbers on non-finished (@divineforest, #408) - Eliminate extra RedisAdapter hget (@karmakaze, #407) - Remove unecessary code from Experiment class (@pakallis, #391, #392, #393) Misc: - Simplify Configuration#normalized_experiments (@pakallis, #395) - Clarify test running instructions (@henrik, #397) ## 1.4.4 (May 9th, 2016) Bugfixes: - Increment participation if store override is true and no experiment key exists (@spheric, #380) Misc: - Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389) - Added minimum version requirement to simple-random - Clarify finished with first option being a hash in Readme (@henrik, #382) - Refactoring the User abstraction (@andreibondarev, #384) ## 1.4.3 (April 28th, 2016) Features: - add on_trial callback whenever a trial is started (@mtyeh411, #375) Bugfixes: - Allow algorithm configuration at experiment level (@007sumit, #376) Misc: - only choose override if it exists as valid alternative (@spheric, #377) ## 1.4.2 (April 25th, 2016) Misc: - Deprecated some legacy methods (@andreibondarev, #374) ## 1.4.1 (April 21st, 2016) Bugfixes: - respect manual start configuration after an experiment has been deleted (@mtyeh411, #372) Misc: - Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365) - Revise specs according to http://betterspecs.org/ (@hkliya, #369) ## 1.4.0 (April 2nd, 2016) Features: - Added experiment filters to dashboard (@ccallebs, #363, #364) - Added Contributor Covenant Code of Conduct ## 1.3.2 (January 2nd, 2016) Bugfixes: - Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352) ## 1.3.1 (January 1st, 2016) Bugfixes: - Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349) ## 1.3.0 (October 20th, 2015) Features: - allow for custom redis_url different from ENV variable (@davidgrieser, #323) - add ability to change the length of the persistence cookie (@peterylai, #335) Bugfixes: - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342) - Fix active experiments when experiment is on a later version (@ndrisso, #331) - Fix caching of winning alternative (@nfm, #329) Misc: - Remove duplication from Experiment#save (@pakallis, #333) - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332) ## 1.2.1 (May 17th, 2015) Features: - Handle redis DNS resolution failures gracefully (@fusion2004, #310) - Push metadata to ab_test block (@ekorneeff, #296) - Helper methods are now private when included in controllers (@ipoval, #303) Bugfixes: - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313) - Don't use capture helper from ActionView (@tomasdundacek, #312) Misc: - Remove body "max-width" from dashboard (@xicreative, #299) - fix private for class methods (@ipoval, #301) - minor memoization fix in spec (@ipoval, #304) - Minor documentation fixes (#295, #297, #305, #308) ## 1.2.0 (January 24th, 2015) Features: - Configure redis using environment variables if available (@saratovsource , #293) - Store metadata on experiment configuration (@dekz, #291) Bugfixes: - Revert the Trial#complete! public API to support noargs (@dekz, #292) ## 1.1.0 (January 9th, 2015) Changes: - Public class methods on `Split::Experiment` (e.g., `find_or_create`) have been moved to `Split::ExperimentCatalog`. Features: - Decouple trial from Split::Helper (@joshdover, #286) - Helper method for Active Experiments (@blahblahblah-, #273) Misc: - Use the new travis container based infrastructure for tests (@andrew, #280) ## 1.0.0 (October 12th, 2014) Changes: - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271) ## 0.8.0 (September 25th, 2014) Features: - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251) - support multiple metrics per experiment (@stevenou, #260) Bugfixes: - Avoiding call to params in EncapsulatedHelper (@afn, #257) ## 0.7.3 (September 16th, 2014) Features: - Disable all split tests via a URL parameter (@hwartig, #263) Bugfixes: - Correctly escape experiment names on dashboard (@ecaron, #265) - Handle redis connection exception error properly (@andrew, #245) ## 0.7.2 (June 12th, 2014) Features: - Show metrics on the dashboard (@swrobel, #241) Bugfixes: - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253) - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255) ## 0.7.1 (March 20th, 2014) Features: - You can now reopen experiment from the dashboard (@mikezaby, #235) Misc: - Internal code tidy up (@IanVaughan, #238) ## 0.7.0 (December 26th, 2013) Features: - Significantly improved z-score algorithm (@caser ,#221) - Better sorting of Experiments on dashboard (@wadako111, #218) Bugfixes: - Fixed start button not being displayed in some cases (@vigosan, #219) Misc: - Experiment#initialize refactoring (@nberger, #224) - Extract ExperimentStore into a seperate class (@nberger, #225) ## 0.6.6 (October 15th, 2013) Features: - Sort experiments on Dashboard so "active" ones without a winner appear first (@swrobel, #204) - Starting tests manually (@duksis, #209) Bugfixes: - Only trigger completion callback with valid Trial (@segfaultAX, #208) - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213) Misc: - Added more bots to filter list (@lbeder, #214, #215, #216) ## 0.6.5 (August 23, 2013) Features: - Added Redis adapter for persisting experiments across sessions (@fengb, #203) Misc: - Expand upon algorithms section in README (@swrobel, #200) ## 0.6.4 (August 8, 2013) Features: - Add hooks for experiment deletion and resetting (@craigmcnamara, #198) - Allow Split::Helper to be used outside of a controller (@nfm, #190) - Show current Rails/Rack Env in dashboard (@rceee, #187) Bugfixes: - Fix whiplash algorithm when using goals (@swrobel, #193) Misc: - Refactor dashboard js (@buddhamagnet) ## 0.6.3 (July 8, 2013) Features: - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176) Bugfixes: - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177) ## 0.6.2 (June 6, 2013) Features: - Rails 2.3 compatibility (@bhcarpenter, #167) - Adding possibility to store overridden alternative (@duksis, #173) Misc: - Now testing against multiple versions of rails ## 0.6.1 (May 4, 2013) Bugfixes: - Use the specified algorithm for the experiment instead of the default (@woodhull, #165) Misc: - Ensure experiements are valid when configuring (@ashmckenzie, #159) - Allow arrays to be passed to ab_test (@fenelon, #156) ## 0.6.0 (April 4, 2013) Features: - Support for Ruby 2.0.0 (@phoet, #142) - Multiple Goals (@liujin, #109) - Ignoring IPs using Regular Expressions (@waynemoore, #119) - Added ability to add more bots to the default list (@themgt, #140) - Allow custom configuration of user blocking logic (@phoet , #148) Bugfixes: - Fixed regression in handling of config files (@iangreenleaf, #115) - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67) - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126) Misc: - updated minimum json version requirement - Refactor Yaml Configuration (@rtwomey, #124) - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118) - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko) - Improved Readme (@iangreenleaf @phoet) ## 0.5.0 (January 28, 2013) Features: - Persistence Adapters: Cookies and Session (@patbenatar, #98) - Configure experiments from a hash (@iangreenleaf, #97) - Pluggable sampling algorithms (@woodhull, #105) Bugfixes: - Fixed negative number of non-finished rates (@philnash, #83) - Fixed behaviour of finished(:reset => false) (@philnash, #88) - Only take into consideration positive z-scores (@thomasmaas, #96) - Amended ab_test method to raise ArgumentError if passed integers or symbols as alternatives (@buddhamagnet, #81) ## 0.4.6 (October 28, 2012) Features: - General code quality improvements (@buddhamagnet, #79) Bugfixes: - Don't increment the experiment counter if user has finished (@dimko, #78) - Fixed an incorrect test (@jaywengrow, #74) ## 0.4.5 (August 30, 2012) Bugfixes: - Fixed header gradient in FF/Opera (@philnash, #69) - Fixed reseting of experiment in session (@apsoto, #43) ## 0.4.4 (August 9, 2012) Features: - Allow parameter overrides, even without Redis. (@bhcarpenter, #62) Bugfixes: - Fixes version number always increasing when alternatives are changed (@philnash, #63) - updated guard-rspec to version 1.2 ## 0.4.3 (July 8, 2012) Features: - redis failover now recovers from all redis-related exceptions ## 0.4.2 (June 1, 2012) Features: - Now works with v3.0 of redis gem Bugfixes: - Fixed redis failover on Rubinius ## 0.4.1 (April 6, 2012) Features: - Added configuration option to disable Split testing (@ilyakatz, #45) Bugfixes: - Fix weights for existing experiments (@andreas, #40) - Fixed dashboard range error (@andrew, #42) ## 0.4.0 (March 7, 2012) **IMPORTANT** If using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug. Features: - Experiments now record when they were started (@vrish88, #35) - Old versions of experiments in sessions are now cleaned up - Avoid users participating in multiple experiments at once (#21) Bugfixes: - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34) - confidence_level helper should handle tiny z-scores (#23) ## 0.3.3 (February 16, 2012) Bugfixes: - Fixed redis failover when a block was passed to ab_test (@layflags, #33) ## 0.3.2 (February 12, 2012) Features: - Handle redis errors gracefully (@layflags, #32) ## 0.3.1 (November 19, 2011) Features: - General code tidy up (@ryanlecompte, #22, @mocoso, #28) - Lazy loading data from Redis (@lautis, #25) Bugfixes: - Handle unstarted experiments (@mocoso, #27) - Relaxed Sinatra version requirement (@martinclu, #24) ## 0.3.0 (October 9, 2011) Features: - Redesigned dashboard (@mrappleton, #17) - Use atomic increments in redis for better concurrency (@lautis, #18) - Weighted alternatives Bugfixes: - Fix to allow overriding of experiments that aren't on version 1 ## 0.2.4 (July 18, 2011) Features: - Added option to finished to not reset the users session Bugfixes: - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols ## 0.2.3 (June 26, 2011) Features: - Experiments can now be deleted from the dashboard - ab_test helper now accepts a block - Improved dashboard Bugfixes: - After resetting an experiment, existing users of that experiment will also be reset ## 0.2.2 (June 11, 2011) Features: - Updated redis-namespace requirement to 1.0.3 - Added a configuration object for changing options - Robot regex can now be changed via a configuration options - Added ability to ignore visits from specified IP addresses - Dashboard now shows percentage improvement of alternatives compared to the control - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives Bugfixes: - Saving an experiment multiple times no longer creates duplicate alternatives ## 0.2.1 (May 29, 2011) Bugfixes: - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x ## 0.2.0 (May 29, 2011) Features: - Override an alternative via a url parameter - Experiments can now be reset from the dashboard - The first alternative is now considered the control - General dashboard usability improvements - Robots are ignored and given the control alternative Bugfixes: - Alternatives are now store in a list rather than a set to ensure consistent ordering - Fixed diving by zero errors ## 0.1.1 (May 18, 2011) Bugfixes: - More Robust conversion rate display on dashboard - Ensure `Split::Version` is available everywhere, fixed dashboard ## 0.1.0 (May 17, 2011) Initial Release <MSG> Add Changelog <DFF> @@ -1,3 +1,8 @@ +## 3.4.1 (November 12th, 2019) + +Bugfixes: +- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) + ## 3.4.0 (November 9th, 2019) Features:
5
Add Changelog
0
.md
md
mit
splitrb/split
10070801
<NME> CHANGELOG.md <BEF> ## 3.4.0 (November 9th, 2019) Features: - Force experiment does not count for metrics (@andrehjr, #637) - Fix cleanup_old_versions! misbehaviour (@serggl, #661) Features: - Make goals accessible via on_trial_complete callbacks (@robin-phung, #625) - Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616) - Introduce enable/disable experiment cohorting (@robin-phung, #615) - Add on_experiment_winner_choose callback (@GenaMinenkov, #574) - Add Split::Cache to reduce load on Redis (@rdh, #648) - Caching based optimization in the experiment#save path (@amangup, #652) - Adds config option for cookie domain (@joedelia, #664) Misc: - Drop support for Ruby < 2.5 (@andrehjr, #627) - Drop support for Rails < 5 (@andrehjr, #607) - Bump minimum required redis to 4.2 (@andrehjr, #628) - Removed repeated loading from config (@robin-phung, #619) - Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632) - Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631) - Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630) - Fix typo of in `Split::Trial` class variable (TomasBarry, #644) - Single HSET to update values, instead of multiple ones (@andrehjr, #640) - Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659) - Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639) - Adding documentation related to what is stored on cookies. (@andrehjr, #634) - Keep railtie defined under the Split gem namespace (@avit, #666) - Update RSpec helper to support block syntax (@clowder, #665) ## 3.4.1 (November 12th, 2019) Bugfixes: - Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) ## 3.4.0 (November 9th, 2019) Features: - Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release. - Make dashboard pagination default "per" param configurable (@alopatin, #597) Bugfixes: - Fix `force_alternative` for experiments with incremented version (@giraffate, #568) - Persist alternative weights (@giraffate, #570) - Combined experiment performance improvements (@gnanou, #575) - Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577) - When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582) Misc: - Remove `rubyforge_project` from gemspec (@giraffate, #583) - Fix URLs to replace http with https (@giraffate , #584) - Lazily include split helpers in ActionController::Base (@hasghari, #586) - Fix unused variable warnings (@andrehjr, #592) - Fix ruby warnings (@andrehjr, #593) - Update rubocop.yml config (@andrehjr, #594) - Add frozen_string_literal to all files that were missing it (@andrehjr, #595) ## 3.3.2 (April 12th, 2019) Features: - Added uptime robot to configuration.rb (@razel1982, #556) - Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555) Bugfixes: - Fix error message interpolation (@hanibash, #553) - Fix Bigdecimal warnings (@agraves, #551) - Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544) - Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr) Misc: - Update travis to add Rails 6 (@edmilton, #559) - Fix broken specs in developement environment (@dougpetronilio, #557) ## 3.3.1 (January 11th, 2019) Features: - Filter some more bots (@janosch-x, #542) Bugfixes: - Fix Dashboard Pagination Helper typo (@cattekin, #541) - Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539) - fix user participating alternative not found (@NaturalHokke, #536) Misc: - Tweak RSpec instructions (@eliotsykes, #540) - Improve README regarding rspec usage (@vermaxik, #538) ## 3.3.0 (August 13th, 2018) Features: - Added pagination for dashboard (@GeorgeGorbanev, #518) - Add Facebot crawler to list of bots (@pfeiffer, #530) - Ignore previewing requests (@pfeiffer, #531) - Fix binding of ignore_filter (@pfeiffer, #533) Bugfixes: - Fix cookie header duplication (@andrehjr, #522) Performance: - Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509) Misc: - Update development dependencies - test rails 5.2 on travis (@lostapathy, #524) - update ruby versions for travis (@lostapathy, #525) ## 3.2.0 (September 21st, 2017) Features: - Allow configuration of how often winning alternatives are recalculated (@patbl, #501) Bugfixes: - Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503) - Fix combined experiments (@semanticart, #502) ## 3.1.1 (August 30th, 2017) Bugfixes: - Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498) Misc: - Document testing with RSpec (@eliotsykes, #495) ## 3.1.0 (August 14th, 2017) Features: - Support for combined experiments (@daviddening, #493) - Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490) - Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487) Bugfixes: - Blocked a few more common bot user agents (@kylerippey, #485) Misc: - Repository Audit by Maintainer.io (@RichardLitt, #484) - Update development dependencies - Test on ruby 2.4.1 - Test compatibility with rails 5.1 - Add uris to metadata section in gemspec ## 3.0.0 (March 30th, 2017) Features: - added block randomization algorithm and specs (@hulleywood, #475) - Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460) Bugfixes: - Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470) - Fix issue where redis isn't required (@tomciopp , #466) Misc: - Avoid variable_size_secure_compare private method (@eliotsykes, #465) ## 2.2.0 (November 11th, 2016) **Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset. Features: - Remove dependency on Redis::Namespace (@bschaeffer, #425) - Make resetting on experiment change optional (@moggyboy, #430) - Add ability to force alternative on dashboard (@ccallebs, #437) Bugfixes: - Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432) Misc: - Remove Explicit Return (@BradHudson, #441) - Update Redis config docs (@bschaeffer, #422) - Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443) - Removed a couple old ruby 1.8 hacks (@andrew, #456) - Run tests on rails 5 (@andrew, #457) - Fixed a few codeclimate warnings (@andrew, #458) - Use codeclimate for test coverage (@andrew #455) ## 2.1.0 (August 8th, 2016) Features: - Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426) ## 2.0.0 (July 17th, 2016) Breaking changes: - Removed deprecated `finished` and `begin_experiment` methods - Namespaced override param to avoid potential clashes (@henrik, #398) ## 1.7.0 (June 28th, 2016) Features: - Running concurrent experiments on same endpoint/view (@karmakaze, #421) ## 1.6.0 (June 16th, 2016) Features: - Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420) ## 1.5.0 (June 8th, 2016) Features: - Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409) - Optional custom persistence adapter (@ndelage, #411) Misc: - Use fakeredis for testing (@andrew, #412) ## 1.4.5 (June 7th, 2016) Bugfixes: - FIX Negative numbers on non-finished (@divineforest, #408) - Eliminate extra RedisAdapter hget (@karmakaze, #407) - Remove unecessary code from Experiment class (@pakallis, #391, #392, #393) Misc: - Simplify Configuration#normalized_experiments (@pakallis, #395) - Clarify test running instructions (@henrik, #397) ## 1.4.4 (May 9th, 2016) Bugfixes: - Increment participation if store override is true and no experiment key exists (@spheric, #380) Misc: - Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389) - Added minimum version requirement to simple-random - Clarify finished with first option being a hash in Readme (@henrik, #382) - Refactoring the User abstraction (@andreibondarev, #384) ## 1.4.3 (April 28th, 2016) Features: - add on_trial callback whenever a trial is started (@mtyeh411, #375) Bugfixes: - Allow algorithm configuration at experiment level (@007sumit, #376) Misc: - only choose override if it exists as valid alternative (@spheric, #377) ## 1.4.2 (April 25th, 2016) Misc: - Deprecated some legacy methods (@andreibondarev, #374) ## 1.4.1 (April 21st, 2016) Bugfixes: - respect manual start configuration after an experiment has been deleted (@mtyeh411, #372) Misc: - Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365) - Revise specs according to http://betterspecs.org/ (@hkliya, #369) ## 1.4.0 (April 2nd, 2016) Features: - Added experiment filters to dashboard (@ccallebs, #363, #364) - Added Contributor Covenant Code of Conduct ## 1.3.2 (January 2nd, 2016) Bugfixes: - Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352) ## 1.3.1 (January 1st, 2016) Bugfixes: - Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349) ## 1.3.0 (October 20th, 2015) Features: - allow for custom redis_url different from ENV variable (@davidgrieser, #323) - add ability to change the length of the persistence cookie (@peterylai, #335) Bugfixes: - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342) - Fix active experiments when experiment is on a later version (@ndrisso, #331) - Fix caching of winning alternative (@nfm, #329) Misc: - Remove duplication from Experiment#save (@pakallis, #333) - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332) ## 1.2.1 (May 17th, 2015) Features: - Handle redis DNS resolution failures gracefully (@fusion2004, #310) - Push metadata to ab_test block (@ekorneeff, #296) - Helper methods are now private when included in controllers (@ipoval, #303) Bugfixes: - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313) - Don't use capture helper from ActionView (@tomasdundacek, #312) Misc: - Remove body "max-width" from dashboard (@xicreative, #299) - fix private for class methods (@ipoval, #301) - minor memoization fix in spec (@ipoval, #304) - Minor documentation fixes (#295, #297, #305, #308) ## 1.2.0 (January 24th, 2015) Features: - Configure redis using environment variables if available (@saratovsource , #293) - Store metadata on experiment configuration (@dekz, #291) Bugfixes: - Revert the Trial#complete! public API to support noargs (@dekz, #292) ## 1.1.0 (January 9th, 2015) Changes: - Public class methods on `Split::Experiment` (e.g., `find_or_create`) have been moved to `Split::ExperimentCatalog`. Features: - Decouple trial from Split::Helper (@joshdover, #286) - Helper method for Active Experiments (@blahblahblah-, #273) Misc: - Use the new travis container based infrastructure for tests (@andrew, #280) ## 1.0.0 (October 12th, 2014) Changes: - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271) ## 0.8.0 (September 25th, 2014) Features: - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251) - support multiple metrics per experiment (@stevenou, #260) Bugfixes: - Avoiding call to params in EncapsulatedHelper (@afn, #257) ## 0.7.3 (September 16th, 2014) Features: - Disable all split tests via a URL parameter (@hwartig, #263) Bugfixes: - Correctly escape experiment names on dashboard (@ecaron, #265) - Handle redis connection exception error properly (@andrew, #245) ## 0.7.2 (June 12th, 2014) Features: - Show metrics on the dashboard (@swrobel, #241) Bugfixes: - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253) - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255) ## 0.7.1 (March 20th, 2014) Features: - You can now reopen experiment from the dashboard (@mikezaby, #235) Misc: - Internal code tidy up (@IanVaughan, #238) ## 0.7.0 (December 26th, 2013) Features: - Significantly improved z-score algorithm (@caser ,#221) - Better sorting of Experiments on dashboard (@wadako111, #218) Bugfixes: - Fixed start button not being displayed in some cases (@vigosan, #219) Misc: - Experiment#initialize refactoring (@nberger, #224) - Extract ExperimentStore into a seperate class (@nberger, #225) ## 0.6.6 (October 15th, 2013) Features: - Sort experiments on Dashboard so "active" ones without a winner appear first (@swrobel, #204) - Starting tests manually (@duksis, #209) Bugfixes: - Only trigger completion callback with valid Trial (@segfaultAX, #208) - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213) Misc: - Added more bots to filter list (@lbeder, #214, #215, #216) ## 0.6.5 (August 23, 2013) Features: - Added Redis adapter for persisting experiments across sessions (@fengb, #203) Misc: - Expand upon algorithms section in README (@swrobel, #200) ## 0.6.4 (August 8, 2013) Features: - Add hooks for experiment deletion and resetting (@craigmcnamara, #198) - Allow Split::Helper to be used outside of a controller (@nfm, #190) - Show current Rails/Rack Env in dashboard (@rceee, #187) Bugfixes: - Fix whiplash algorithm when using goals (@swrobel, #193) Misc: - Refactor dashboard js (@buddhamagnet) ## 0.6.3 (July 8, 2013) Features: - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176) Bugfixes: - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177) ## 0.6.2 (June 6, 2013) Features: - Rails 2.3 compatibility (@bhcarpenter, #167) - Adding possibility to store overridden alternative (@duksis, #173) Misc: - Now testing against multiple versions of rails ## 0.6.1 (May 4, 2013) Bugfixes: - Use the specified algorithm for the experiment instead of the default (@woodhull, #165) Misc: - Ensure experiements are valid when configuring (@ashmckenzie, #159) - Allow arrays to be passed to ab_test (@fenelon, #156) ## 0.6.0 (April 4, 2013) Features: - Support for Ruby 2.0.0 (@phoet, #142) - Multiple Goals (@liujin, #109) - Ignoring IPs using Regular Expressions (@waynemoore, #119) - Added ability to add more bots to the default list (@themgt, #140) - Allow custom configuration of user blocking logic (@phoet , #148) Bugfixes: - Fixed regression in handling of config files (@iangreenleaf, #115) - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67) - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126) Misc: - updated minimum json version requirement - Refactor Yaml Configuration (@rtwomey, #124) - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118) - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko) - Improved Readme (@iangreenleaf @phoet) ## 0.5.0 (January 28, 2013) Features: - Persistence Adapters: Cookies and Session (@patbenatar, #98) - Configure experiments from a hash (@iangreenleaf, #97) - Pluggable sampling algorithms (@woodhull, #105) Bugfixes: - Fixed negative number of non-finished rates (@philnash, #83) - Fixed behaviour of finished(:reset => false) (@philnash, #88) - Only take into consideration positive z-scores (@thomasmaas, #96) - Amended ab_test method to raise ArgumentError if passed integers or symbols as alternatives (@buddhamagnet, #81) ## 0.4.6 (October 28, 2012) Features: - General code quality improvements (@buddhamagnet, #79) Bugfixes: - Don't increment the experiment counter if user has finished (@dimko, #78) - Fixed an incorrect test (@jaywengrow, #74) ## 0.4.5 (August 30, 2012) Bugfixes: - Fixed header gradient in FF/Opera (@philnash, #69) - Fixed reseting of experiment in session (@apsoto, #43) ## 0.4.4 (August 9, 2012) Features: - Allow parameter overrides, even without Redis. (@bhcarpenter, #62) Bugfixes: - Fixes version number always increasing when alternatives are changed (@philnash, #63) - updated guard-rspec to version 1.2 ## 0.4.3 (July 8, 2012) Features: - redis failover now recovers from all redis-related exceptions ## 0.4.2 (June 1, 2012) Features: - Now works with v3.0 of redis gem Bugfixes: - Fixed redis failover on Rubinius ## 0.4.1 (April 6, 2012) Features: - Added configuration option to disable Split testing (@ilyakatz, #45) Bugfixes: - Fix weights for existing experiments (@andreas, #40) - Fixed dashboard range error (@andrew, #42) ## 0.4.0 (March 7, 2012) **IMPORTANT** If using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug. Features: - Experiments now record when they were started (@vrish88, #35) - Old versions of experiments in sessions are now cleaned up - Avoid users participating in multiple experiments at once (#21) Bugfixes: - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34) - confidence_level helper should handle tiny z-scores (#23) ## 0.3.3 (February 16, 2012) Bugfixes: - Fixed redis failover when a block was passed to ab_test (@layflags, #33) ## 0.3.2 (February 12, 2012) Features: - Handle redis errors gracefully (@layflags, #32) ## 0.3.1 (November 19, 2011) Features: - General code tidy up (@ryanlecompte, #22, @mocoso, #28) - Lazy loading data from Redis (@lautis, #25) Bugfixes: - Handle unstarted experiments (@mocoso, #27) - Relaxed Sinatra version requirement (@martinclu, #24) ## 0.3.0 (October 9, 2011) Features: - Redesigned dashboard (@mrappleton, #17) - Use atomic increments in redis for better concurrency (@lautis, #18) - Weighted alternatives Bugfixes: - Fix to allow overriding of experiments that aren't on version 1 ## 0.2.4 (July 18, 2011) Features: - Added option to finished to not reset the users session Bugfixes: - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols ## 0.2.3 (June 26, 2011) Features: - Experiments can now be deleted from the dashboard - ab_test helper now accepts a block - Improved dashboard Bugfixes: - After resetting an experiment, existing users of that experiment will also be reset ## 0.2.2 (June 11, 2011) Features: - Updated redis-namespace requirement to 1.0.3 - Added a configuration object for changing options - Robot regex can now be changed via a configuration options - Added ability to ignore visits from specified IP addresses - Dashboard now shows percentage improvement of alternatives compared to the control - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives Bugfixes: - Saving an experiment multiple times no longer creates duplicate alternatives ## 0.2.1 (May 29, 2011) Bugfixes: - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x ## 0.2.0 (May 29, 2011) Features: - Override an alternative via a url parameter - Experiments can now be reset from the dashboard - The first alternative is now considered the control - General dashboard usability improvements - Robots are ignored and given the control alternative Bugfixes: - Alternatives are now store in a list rather than a set to ensure consistent ordering - Fixed diving by zero errors ## 0.1.1 (May 18, 2011) Bugfixes: - More Robust conversion rate display on dashboard - Ensure `Split::Version` is available everywhere, fixed dashboard ## 0.1.0 (May 17, 2011) Initial Release <MSG> Add Changelog <DFF> @@ -1,3 +1,8 @@ +## 3.4.1 (November 12th, 2019) + +Bugfixes: +- Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) + ## 3.4.0 (November 9th, 2019) Features:
5
Add Changelog
0
.md
md
mit
splitrb/split
10070802
<NME> .gitignore <BEF> build node_modules <MSG> ignore foo.js <DFF> @@ -1,2 +1,3 @@ build node_modules +foo.js \ No newline at end of file
1
ignore foo.js
0
gitignore
bsd-2-clause
jlongster/transducers.js
10070803
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", s.require_paths = ["lib"] s.add_dependency 'redis', '>= 2.1' s.add_dependency 'redis-namespace', '>= 1.1.0' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Remove dependency on Redis::Namespace (#425) * Rename redis_url config to redis * Remove dependency on redis-namespace * Add backwards compatible redis_url with deprecation <DFF> @@ -21,7 +21,6 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency 'redis', '>= 2.1' - s.add_dependency 'redis-namespace', '>= 1.1.0' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3'
0
Remove dependency on Redis::Namespace (#425)
1
.gemspec
gemspec
mit
splitrb/split
10070804
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", s.require_paths = ["lib"] s.add_dependency 'redis', '>= 2.1' s.add_dependency 'redis-namespace', '>= 1.1.0' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Remove dependency on Redis::Namespace (#425) * Rename redis_url config to redis * Remove dependency on redis-namespace * Add backwards compatible redis_url with deprecation <DFF> @@ -21,7 +21,6 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency 'redis', '>= 2.1' - s.add_dependency 'redis-namespace', '>= 1.1.0' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3'
0
Remove dependency on Redis::Namespace (#425)
1
.gemspec
gemspec
mit
splitrb/split
10070805
<NME> split.gemspec <BEF> # -*- encoding: utf-8 -*- # frozen_string_literal: true $:.push File.expand_path("../lib", __FILE__) require "split/version" Gem::Specification.new do |s| s.name = "split" s.version = Split::VERSION s.platform = Gem::Platform::RUBY s.authors = ["Andrew Nesbitt"] s.licenses = ["MIT"] s.email = ["[email protected]"] s.homepage = "https://github.com/splitrb/split" s.summary = "Rack based split testing framework" s.metadata = { "homepage_uri" => "https://github.com/splitrb/split", "changelog_uri" => "https://github.com/splitrb/split/blob/main/CHANGELOG.md", "source_code_uri" => "https://github.com/splitrb/split", s.require_paths = ["lib"] s.add_dependency 'redis', '>= 2.1' s.add_dependency 'redis-namespace', '>= 1.1.0' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3' s.required_rubygems_version = ">= 2.0.0" s.files = `git ls-files`.split("\n") s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") s.require_paths = ["lib"] s.add_dependency "redis", ">= 4.2" s.add_dependency "sinatra", ">= 1.2.6" s.add_dependency "rubystats", ">= 0.3.0" s.add_development_dependency "bundler", ">= 1.17" s.add_development_dependency "simplecov", "~> 0.15" s.add_development_dependency "rack-test", "~> 2.0" s.add_development_dependency "rake", "~> 13" s.add_development_dependency "rspec", "~> 3.7" s.add_development_dependency "pry", "~> 0.10" s.add_development_dependency "rails", ">= 5.0" end <MSG> Remove dependency on Redis::Namespace (#425) * Rename redis_url config to redis * Remove dependency on redis-namespace * Add backwards compatible redis_url with deprecation <DFF> @@ -21,7 +21,6 @@ Gem::Specification.new do |s| s.require_paths = ["lib"] s.add_dependency 'redis', '>= 2.1' - s.add_dependency 'redis-namespace', '>= 1.1.0' s.add_dependency 'sinatra', '>= 1.2.6' s.add_dependency 'simple-random', '>= 0.9.3'
0
Remove dependency on Redis::Namespace (#425)
1
.gemspec
gemspec
mit
splitrb/split
10070806
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end it "should not store the overridden test group per default" do expect(@config.store_override).to be_falsey end it "should provide a default pattern for robots" do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| expect(@config.robot_regex).to match(robot) end expect(@config.robot_regex).to match("EventMachine HttpClient") expect(@config.robot_regex).to match("libwww-perl/5.836") expect(@config.robot_regex).to match("Pingdom.com_bot_version_1.4_(http://www.pingdom.com)") expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end context "redis configuration" do it "should default to local redis server" do old_redis_url = ENV["REDIS_URL"] ENV.delete("REDIS_URL") context "redis configuration" do it "should default to local redis server" do expect(@config.redis).to eq("redis://localhost:6379") end it "should allow for redis url to be configured" do it "should use the ENV variable" do old_redis_url = ENV["REDIS_URL"] ENV["REDIS_URL"] = "env_redis_url" context "provided REDIS_URL environment variable" do it "should use the ENV variable" do ENV['REDIS_URL'] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") ENV.delete('REDIS_URL') end end end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #662 from splitrb/gh-actions Moving to Github Actions for CI <DFF> @@ -214,7 +214,10 @@ describe Split::Configuration do context "redis configuration" do it "should default to local redis server" do - expect(@config.redis).to eq("redis://localhost:6379") + old_redis_url = ENV['REDIS_URL'] + ENV.delete('REDIS_URL') + expect(Split::Configuration.new.redis).to eq("redis://localhost:6379") + ENV['REDIS_URL'] = old_redis_url end it "should allow for redis url to be configured" do @@ -224,9 +227,10 @@ describe Split::Configuration do context "provided REDIS_URL environment variable" do it "should use the ENV variable" do + old_redis_url = ENV['REDIS_URL'] ENV['REDIS_URL'] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") - ENV.delete('REDIS_URL') + ENV['REDIS_URL'] = old_redis_url end end end
6
Merge pull request #662 from splitrb/gh-actions
2
.rb
rb
mit
splitrb/split
10070807
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end it "should not store the overridden test group per default" do expect(@config.store_override).to be_falsey end it "should provide a default pattern for robots" do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| expect(@config.robot_regex).to match(robot) end expect(@config.robot_regex).to match("EventMachine HttpClient") expect(@config.robot_regex).to match("libwww-perl/5.836") expect(@config.robot_regex).to match("Pingdom.com_bot_version_1.4_(http://www.pingdom.com)") expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end context "redis configuration" do it "should default to local redis server" do old_redis_url = ENV["REDIS_URL"] ENV.delete("REDIS_URL") context "redis configuration" do it "should default to local redis server" do expect(@config.redis).to eq("redis://localhost:6379") end it "should allow for redis url to be configured" do it "should use the ENV variable" do old_redis_url = ENV["REDIS_URL"] ENV["REDIS_URL"] = "env_redis_url" context "provided REDIS_URL environment variable" do it "should use the ENV variable" do ENV['REDIS_URL'] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") ENV.delete('REDIS_URL') end end end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #662 from splitrb/gh-actions Moving to Github Actions for CI <DFF> @@ -214,7 +214,10 @@ describe Split::Configuration do context "redis configuration" do it "should default to local redis server" do - expect(@config.redis).to eq("redis://localhost:6379") + old_redis_url = ENV['REDIS_URL'] + ENV.delete('REDIS_URL') + expect(Split::Configuration.new.redis).to eq("redis://localhost:6379") + ENV['REDIS_URL'] = old_redis_url end it "should allow for redis url to be configured" do @@ -224,9 +227,10 @@ describe Split::Configuration do context "provided REDIS_URL environment variable" do it "should use the ENV variable" do + old_redis_url = ENV['REDIS_URL'] ENV['REDIS_URL'] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") - ENV.delete('REDIS_URL') + ENV['REDIS_URL'] = old_redis_url end end end
6
Merge pull request #662 from splitrb/gh-actions
2
.rb
rb
mit
splitrb/split
10070808
<NME> configuration_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" describe Split::Configuration do before(:each) { @config = Split::Configuration.new } it "should provide a default value for ignore_ip_addresses" do expect(@config.ignore_ip_addresses).to eq([]) end it "should provide default values for db failover" do expect(@config.db_failover).to be_falsey expect(@config.db_failover_on_db_error).to be_a Proc end it "should not allow multiple experiments by default" do expect(@config.allow_multiple_experiments).to be_falsey end it "should be enabled by default" do expect(@config.enabled).to be_truthy end it "disabled is the opposite of enabled" do @config.enabled = false expect(@config.disabled?).to be_truthy end it "should not store the overridden test group per default" do expect(@config.store_override).to be_falsey end it "should provide a default pattern for robots" do %w[Baidu Gigabot Googlebot libwww-perl lwp-trivial msnbot SiteUptime Slurp WordPress ZIBB ZyBorg YandexBot AdsBot-Google Wget curl bitlybot facebookexternalhit spider].each do |robot| expect(@config.robot_regex).to match(robot) end expect(@config.robot_regex).to match("EventMachine HttpClient") expect(@config.robot_regex).to match("libwww-perl/5.836") expect(@config.robot_regex).to match("Pingdom.com_bot_version_1.4_(http://www.pingdom.com)") expect(@config.robot_regex).to match(" - ") end it "should accept real UAs with the robot regexp" do expect(@config.robot_regex).not_to match("Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0") expect(@config.robot_regex).not_to match("Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; F-6.0SP2-20041109; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET CLR 1.1.4322; InfoPath.3)") end it "should allow adding a bot to the bot list" do @config.bots["newbot"] = "An amazing test bot" expect(@config.robot_regex).to match("newbot") end it "should use the session adapter for persistence by default" do expect(@config.persistence).to eq(Split::Persistence::SessionAdapter) end it "should load a metric" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end it "should allow loading of experiment using experment_for" do @config.experiments = { my_experiment: { alternatives: ["control_opt", "other_opt"], metric: :my_metric } } expect(@config.experiment_for(:my_experiment)).to eq({ alternatives: ["control_opt", ["other_opt"]] }) end context "when experiments are defined via YAML" do context "as strings" do context "in a basic configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - Control Opt - Alt One - Alt Two resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "in a configuration with metadata" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 metadata: Control Opt: text: 'Control Option' Alt One: text: 'Alternative One' Alt Two: text: 'Alternative Two' resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should have metadata on the experiment" do meta = @config.normalized_experiments[:my_experiment][:metadata] expect(meta).to_not be nil expect(meta["Control Opt"]["text"]).to eq("Control Option") end end context "in a complex configuration" do before do experiments_yaml = <<-eos my_experiment: alternatives: - name: Control Opt percent: 67 - name: Alt One percent: 10 - name: Alt Two percent: 23 resettable: false metric: my_metric another_experiment: alternatives: - a - b eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: [{ "Control Opt"=>0.67 }, [{ "Alt One"=>0.1 }, { "Alt Two"=>0.23 }]] }, another_experiment: { alternatives: ["a", ["b"]] } }) end it "should recognize metrics" do expect(@config.metrics).not_to be_nil expect(@config.metrics.keys).to eq([:my_metric]) end end end context "as symbols" do context "with valid YAML" do before do experiments_yaml = <<-eos :my_experiment: :alternatives: - Control Opt - Alt One - Alt Two :resettable: false eos @config.experiments = YAML.load(experiments_yaml) end it "should normalize experiments" do expect(@config.normalized_experiments).to eq({ my_experiment: { resettable: false, alternatives: ["Control Opt", ["Alt One", "Alt Two"]] } }) end end context "with invalid YAML" do let(:yaml) { YAML.load(input) } context "with an empty string" do let(:input) { "" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end context "with just the YAML header" do let(:input) { "---" } it "should raise an error" do expect { @config.experiments = yaml }.to raise_error(Split::InvalidExperimentsFormatError) end end end end end it "should normalize experiments" do @config.experiments = { my_experiment: { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } } expect(@config.normalized_experiments).to eq({ my_experiment: { alternatives: [{ "control_opt"=>0.67 }, [{ "second_opt"=>0.1 }, { "third_opt"=>0.23 }]] } }) end context "redis configuration" do it "should default to local redis server" do old_redis_url = ENV["REDIS_URL"] ENV.delete("REDIS_URL") context "redis configuration" do it "should default to local redis server" do expect(@config.redis).to eq("redis://localhost:6379") end it "should allow for redis url to be configured" do it "should use the ENV variable" do old_redis_url = ENV["REDIS_URL"] ENV["REDIS_URL"] = "env_redis_url" context "provided REDIS_URL environment variable" do it "should use the ENV variable" do ENV['REDIS_URL'] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") ENV.delete('REDIS_URL') end end end it "should allow the persistence cookie length to be configured" do @config.persistence_cookie_length = 2592000 expect(@config.persistence_cookie_length).to eq(2592000) end end context "persistence cookie domain" do it "should default to nil" do expect(@config.persistence_cookie_domain).to eq(nil) end it "should allow the persistence cookie domain to be configured" do @config.persistence_cookie_domain = ".acme.com" expect(@config.persistence_cookie_domain).to eq(".acme.com") end end end <MSG> Merge pull request #662 from splitrb/gh-actions Moving to Github Actions for CI <DFF> @@ -214,7 +214,10 @@ describe Split::Configuration do context "redis configuration" do it "should default to local redis server" do - expect(@config.redis).to eq("redis://localhost:6379") + old_redis_url = ENV['REDIS_URL'] + ENV.delete('REDIS_URL') + expect(Split::Configuration.new.redis).to eq("redis://localhost:6379") + ENV['REDIS_URL'] = old_redis_url end it "should allow for redis url to be configured" do @@ -224,9 +227,10 @@ describe Split::Configuration do context "provided REDIS_URL environment variable" do it "should use the ENV variable" do + old_redis_url = ENV['REDIS_URL'] ENV['REDIS_URL'] = "env_redis_url" expect(Split::Configuration.new.redis).to eq("env_redis_url") - ENV.delete('REDIS_URL') + ENV['REDIS_URL'] = old_redis_url end end end
6
Merge pull request #662 from splitrb/gh-actions
2
.rb
rb
mit
splitrb/split
10070809
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2) experiment = Split::ExperimentCatalog.find('link_color') expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red']) # TODO: persist alternative weights # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #570 from giraffate/persist_alternative_weights Persist alternative weights <DFF> @@ -183,8 +183,7 @@ describe Split::Helper do ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2) experiment = Split::ExperimentCatalog.find('link_color') expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red']) - # TODO: persist alternative weights - # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2]) + expect(experiment.alternatives.collect{|a| a.weight}).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do
1
Merge pull request #570 from giraffate/persist_alternative_weights
2
.rb
rb
mit
splitrb/split
10070810
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2) experiment = Split::ExperimentCatalog.find('link_color') expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red']) # TODO: persist alternative weights # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #570 from giraffate/persist_alternative_weights Persist alternative weights <DFF> @@ -183,8 +183,7 @@ describe Split::Helper do ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2) experiment = Split::ExperimentCatalog.find('link_color') expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red']) - # TODO: persist alternative weights - # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2]) + expect(experiment.alternatives.collect{|a| a.weight}).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do
1
Merge pull request #570 from giraffate/persist_alternative_weights
2
.rb
rb
mit
splitrb/split
10070811
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2) experiment = Split::ExperimentCatalog.find('link_color') expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red']) # TODO: persist alternative weights # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Merge pull request #570 from giraffate/persist_alternative_weights Persist alternative weights <DFF> @@ -183,8 +183,7 @@ describe Split::Helper do ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2) experiment = Split::ExperimentCatalog.find('link_color') expect(experiment.alternatives.map(&:name)).to eq(['blue', 'red']) - # TODO: persist alternative weights - # expect(experiment.alternatives.collect{|a| a.weight}).to eq([0.01, 0.2]) + expect(experiment.alternatives.collect{|a| a.weight}).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do
1
Merge pull request #570 from giraffate/persist_alternative_weights
2
.rb
rb
mit
splitrb/split
10070812
<NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end options[:alternatives] = alts set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} alts end def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)} goals_collection.save save_metadata Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil? else existing_alternatives = load_alternatives_from_redis existing_goals = Split::GoalsCollection.new(@name).load_from_redis def validate! if @alternatives.empty? && Split.configuration.experiment_for(@name).nil? raise ExperimentNotFound.new("Experiment #{@name} not found") end @alternatives.each { |a| a.validate! } goals_collection.validate! end def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], alternatives: load_alternatives_from_redis, goals: Split::GoalsCollection.new(@name).load_from_redis, metadata: load_metadata_from_redis } set_alternatives_and_options(options) end def calc_winning_alternatives # Cache the winning alternatives so we recalculate them once per the specified interval. intervals_since_epoch = Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval if self.calc_time != intervals_since_epoch if goals.empty? self.estimate_winning_alternative else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Remove unecessary code from Experiment#save Experiment#save_metadata is called in the previous line, which resulted in calling `Split.redis.set(metadata_key, @metadata.to_json)` twice. I removed the unecessary line. <DFF> @@ -86,7 +86,6 @@ module Split @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)} goals_collection.save save_metadata - Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil? else existing_alternatives = load_alternatives_from_redis existing_goals = Split::GoalsCollection.new(@name).load_from_redis
0
Remove unecessary code from Experiment#save
1
.rb
rb
mit
splitrb/split
10070813
<NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end options[:alternatives] = alts set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} alts end def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)} goals_collection.save save_metadata Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil? else existing_alternatives = load_alternatives_from_redis existing_goals = Split::GoalsCollection.new(@name).load_from_redis def validate! if @alternatives.empty? && Split.configuration.experiment_for(@name).nil? raise ExperimentNotFound.new("Experiment #{@name} not found") end @alternatives.each { |a| a.validate! } goals_collection.validate! end def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], alternatives: load_alternatives_from_redis, goals: Split::GoalsCollection.new(@name).load_from_redis, metadata: load_metadata_from_redis } set_alternatives_and_options(options) end def calc_winning_alternatives # Cache the winning alternatives so we recalculate them once per the specified interval. intervals_since_epoch = Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval if self.calc_time != intervals_since_epoch if goals.empty? self.estimate_winning_alternative else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Remove unecessary code from Experiment#save Experiment#save_metadata is called in the previous line, which resulted in calling `Split.redis.set(metadata_key, @metadata.to_json)` twice. I removed the unecessary line. <DFF> @@ -86,7 +86,6 @@ module Split @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)} goals_collection.save save_metadata - Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil? else existing_alternatives = load_alternatives_from_redis existing_goals = Split::GoalsCollection.new(@name).load_from_redis
0
Remove unecessary code from Experiment#save
1
.rb
rb
mit
splitrb/split
10070814
<NME> experiment.rb <BEF> # frozen_string_literal: true module Split class Experiment attr_accessor :name attr_accessor :goals attr_accessor :alternative_probabilities attr_accessor :metadata attr_reader :alternatives attr_reader :resettable DEFAULT_OPTIONS = { resettable: true } def self.find(name) Split.cache(:experiments, name) do return unless Split.redis.exists?(name) Experiment.new(name).tap { |exp| exp.load_from_redis } end end def initialize(name, options = {}) options = DEFAULT_OPTIONS.merge(options) @name = name.to_s extract_alternatives_from_options(options) end def self.finished_key(key) "#{key}:finished" end def set_alternatives_and_options(options) options_with_defaults = DEFAULT_OPTIONS.merge( options.reject { |k, v| v.nil? } ) self.alternatives = options_with_defaults[:alternatives] self.goals = options_with_defaults[:goals] self.resettable = options_with_defaults[:resettable] self.algorithm = options_with_defaults[:algorithm] self.metadata = options_with_defaults[:metadata] end def extract_alternatives_from_options(options) alts = options[:alternatives] || [] if alts.length == 1 if alts[0].is_a? Hash alts = alts[0].map { |k, v| { k => v } } end end if alts.empty? exp_config = Split.configuration.experiment_for(name) if exp_config alts = load_alternatives_from_configuration options[:goals] = Split::GoalsCollection.new(@name).load_from_configuration options[:metadata] = load_metadata_from_configuration options[:resettable] = exp_config[:resettable] options[:algorithm] = exp_config[:algorithm] end end options[:alternatives] = alts set_alternatives_and_options(options) # calculate probability that each alternative is the winner @alternative_probabilities = {} alts end def save validate! if new_record? start unless Split.configuration.start_manually persist_experiment_configuration elsif experiment_configuration_has_changed? reset unless Split.configuration.reset_manually persist_experiment_configuration @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)} goals_collection.save save_metadata Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil? else existing_alternatives = load_alternatives_from_redis existing_goals = Split::GoalsCollection.new(@name).load_from_redis def validate! if @alternatives.empty? && Split.configuration.experiment_for(@name).nil? raise ExperimentNotFound.new("Experiment #{@name} not found") end @alternatives.each { |a| a.validate! } goals_collection.validate! end def new_record? ExperimentCatalog.find(name).nil? end def ==(obj) self.name == obj.name end def [](name) alternatives.find { |a| a.name == name } end def algorithm @algorithm ||= Split.configuration.algorithm end def algorithm=(algorithm) @algorithm = algorithm.is_a?(String) ? algorithm.constantize : algorithm end def resettable=(resettable) @resettable = resettable.is_a?(String) ? resettable == "true" : resettable end def alternatives=(alts) @alternatives = alts.map do |alternative| if alternative.kind_of?(Split::Alternative) alternative else Split::Alternative.new(alternative, @name) end end end def winner Split.cache(:experiment_winner, name) do experiment_winner = redis.hget(:experiment_winner, name) if experiment_winner Split::Alternative.new(experiment_winner, name) else nil end end end def has_winner? return @has_winner if defined? @has_winner @has_winner = !winner.nil? end def winner=(winner_name) redis.hset(:experiment_winner, name, winner_name.to_s) @has_winner = true Split.configuration.on_experiment_winner_choose.call(self) end def participant_count alternatives.inject(0) { |sum, a| sum + a.participant_count } end def control alternatives.first end def reset_winner redis.hdel(:experiment_winner, name) @has_winner = false Split::Cache.clear_key(@name) end def start redis.hset(:experiment_start_times, @name, Time.now.to_i) end def start_time Split.cache(:experiment_start_times, @name) do t = redis.hget(:experiment_start_times, @name) if t # Check if stored time is an integer if t =~ /^[-+]?[0-9]+$/ Time.at(t.to_i) else Time.parse(t) end end end end def next_alternative winner || random_alternative end def random_alternative if alternatives.length > 1 algorithm.choose_alternative(self) else alternatives.first end end def version @version ||= (redis.get("#{name}:version").to_i || 0) end def increment_version @version = redis.incr("#{name}:version") end def key if version.to_i > 0 "#{name}:#{version}" else name end end def goals_key "#{name}:goals" end def finished_key self.class.finished_key(key) end def metadata_key "#{name}:metadata" end def resettable? resettable end def reset Split.configuration.on_before_experiment_reset.call(self) Split::Cache.clear_key(@name) alternatives.each(&:reset) reset_winner Split.configuration.on_experiment_reset.call(self) increment_version end def delete Split.configuration.on_before_experiment_delete.call(self) if Split.configuration.start_manually redis.hdel(:experiment_start_times, @name) end reset_winner redis.srem(:experiments, name) remove_experiment_cohorting remove_experiment_configuration Split.configuration.on_experiment_delete.call(self) increment_version end def delete_metadata redis.del(metadata_key) end def load_from_redis exp_config = redis.hgetall(experiment_config_key) options = { resettable: exp_config["resettable"], algorithm: exp_config["algorithm"], alternatives: load_alternatives_from_redis, goals: Split::GoalsCollection.new(@name).load_from_redis, metadata: load_metadata_from_redis } set_alternatives_and_options(options) end def calc_winning_alternatives # Cache the winning alternatives so we recalculate them once per the specified interval. intervals_since_epoch = Time.now.utc.to_i / Split.configuration.winning_alternative_recalculation_interval if self.calc_time != intervals_since_epoch if goals.empty? self.estimate_winning_alternative else goals.each do |goal| self.estimate_winning_alternative(goal) end end self.calc_time = intervals_since_epoch self.save end end def estimate_winning_alternative(goal = nil) # initialize a hash of beta distributions based on the alternatives' conversion rates beta_params = calc_beta_params(goal) winning_alternatives = [] Split.configuration.beta_probability_simulations.times do # calculate simulated conversion rates from the beta distributions simulated_cr_hash = calc_simulated_conversion_rates(beta_params) winning_alternative = find_simulated_winner(simulated_cr_hash) # push the winning pair to the winning_alternatives array winning_alternatives.push(winning_alternative) end winning_counts = count_simulated_wins(winning_alternatives) @alternative_probabilities = calc_alternative_probabilities(winning_counts, Split.configuration.beta_probability_simulations) write_to_alternatives(goal) self.save end def write_to_alternatives(goal = nil) alternatives.each do |alternative| alternative.set_p_winner(@alternative_probabilities[alternative], goal) end end def calc_alternative_probabilities(winning_counts, number_of_simulations) alternative_probabilities = {} winning_counts.each do |alternative, wins| alternative_probabilities[alternative] = wins / number_of_simulations.to_f end alternative_probabilities end def count_simulated_wins(winning_alternatives) # initialize a hash to keep track of winning alternative in simulations winning_counts = {} alternatives.each do |alternative| winning_counts[alternative] = 0 end # count number of times each alternative won, calculate probabilities, place in hash winning_alternatives.each do |alternative| winning_counts[alternative] += 1 end winning_counts end def find_simulated_winner(simulated_cr_hash) # figure out which alternative had the highest simulated conversion rate winning_pair = ["", 0.0] simulated_cr_hash.each do |alternative, rate| if rate > winning_pair[1] winning_pair = [alternative, rate] end end winner = winning_pair[0] winner end def calc_simulated_conversion_rates(beta_params) simulated_cr_hash = {} # create a hash which has the conversion rate pulled from each alternative's beta distribution beta_params.each do |alternative, params| alpha = params[0] beta = params[1] simulated_conversion_rate = Split::Algorithms.beta_distribution_rng(alpha, beta) simulated_cr_hash[alternative] = simulated_conversion_rate end simulated_cr_hash end def calc_beta_params(goal = nil) beta_params = {} alternatives.each do |alternative| conversions = goal.nil? ? alternative.completed_count : alternative.completed_count(goal) alpha = 1 + conversions beta = 1 + alternative.participant_count - conversions params = [alpha, beta] beta_params[alternative] = params end beta_params end def calc_time=(time) redis.hset(experiment_config_key, :calc_time, time) end def calc_time redis.hget(experiment_config_key, :calc_time).to_i end def jstring(goal = nil) js_id = if goal.nil? name else name + "-" + goal end js_id.gsub("/", "--") end def cohorting_disabled? @cohorting_disabled ||= begin value = redis.hget(experiment_config_key, :cohorting) value.nil? ? false : value.downcase == "true" end end def disable_cohorting @cohorting_disabled = true redis.hset(experiment_config_key, :cohorting, true.to_s) end def enable_cohorting @cohorting_disabled = false redis.hset(experiment_config_key, :cohorting, false.to_s) end protected def experiment_config_key "experiment_configurations/#{@name}" end def load_metadata_from_configuration Split.configuration.experiment_for(@name)[:metadata] end def load_metadata_from_redis meta = redis.get(metadata_key) JSON.parse(meta) unless meta.nil? end def load_alternatives_from_configuration alts = Split.configuration.experiment_for(@name)[:alternatives] raise ArgumentError, "Experiment configuration is missing :alternatives array" unless alts if alts.is_a?(Hash) alts.keys else alts.flatten end end def load_alternatives_from_redis alternatives = redis.lrange(@name, 0, -1) alternatives.map do |alt| alt = begin JSON.parse(alt) rescue alt end Split::Alternative.new(alt, @name) end end private def redis Split.redis end def redis_interface RedisInterface.new end def persist_experiment_configuration redis_interface.add_to_set(:experiments, name) redis_interface.persist_list(name, @alternatives.map { |alt| { alt.name => alt.weight }.to_json }) goals_collection.save if @metadata redis.set(metadata_key, @metadata.to_json) else delete_metadata end end def remove_experiment_configuration @alternatives.each(&:delete) goals_collection.delete delete_metadata redis.del(@name) end def experiment_configuration_has_changed? existing_experiment = Experiment.find(@name) existing_experiment.alternatives.map(&:to_s) != @alternatives.map(&:to_s) || existing_experiment.goals != @goals || existing_experiment.metadata != @metadata end def goals_collection Split::GoalsCollection.new(@name, @goals) end def remove_experiment_cohorting @cohorting_disabled = false redis.hdel(experiment_config_key, :cohorting) end end end <MSG> Remove unecessary code from Experiment#save Experiment#save_metadata is called in the previous line, which resulted in calling `Split.redis.set(metadata_key, @metadata.to_json)` twice. I removed the unecessary line. <DFF> @@ -86,7 +86,6 @@ module Split @alternatives.reverse.each {|a| Split.redis.lpush(name, a.name)} goals_collection.save save_metadata - Split.redis.set(metadata_key, @metadata.to_json) unless @metadata.nil? else existing_alternatives = load_alternatives_from_redis existing_goals = Split::GoalsCollection.new(@name).load_from_redis
0
Remove unecessary code from Experiment#save
1
.rb
rb
mit
splitrb/split
10070815
<NME> expand.ts <BEF> import { strictEqual as equal } from 'assert'; import expand, { resolveConfig } from '../src'; describe('Expand Abbreviation', () => { describe('Markup', () => { it('basic', () => { equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); // insert text into abbreviation equal(expand('ul>.item$*', { text: ['foo', 'bar'] }), '<ul>\n\t<li class="item1">foo</li>\n\t<li class="item2">bar</li>\n</ul>'); // insert TextMate-style fields/tabstops in output equal(expand('ul>.item$*2', { options: { 'output.field': (index, placeholder) => `\${${index}${placeholder ? ':' + placeholder : ''}}` } }), '<ul>\n\t<li class="item1">${1}</li>\n\t<li class="item2">${2}</li>\n</ul>'); }); it('attributes', () => { const snippets = { test: 'test[!foo bar. baz={}]' }; const opt = { snippets }; const reverse = { options: { 'output.reverseAttributes': true }, snippets }; equal(expand('a.test'), '<a href="" class="test"></a>'); equal(expand('a.test', reverse), '<a class="test" href=""></a>'); equal(expand('test', opt), '<test bar="bar" baz={}></test>'); equal(expand('test[foo]', opt), '<test bar="bar" baz={}></test>'); equal(expand('test[baz=a foo=1]', opt), '<test foo="1" bar="bar" baz={a}></test>'); equal(expand('map'), '<map name=""></map>'); equal(expand('map[]'), '<map name=""></map>'); equal(expand('map[name="valid"]'), '<map name="valid"></map>'); equal(expand('map[href="invalid"]'), '<map name="" href="invalid"></map>'); // Apply attributes in reverse order equal(expand('test', reverse), '<test bar="bar" baz={}></test>'); equal(expand('test[foo]', reverse), '<test bar="bar" baz={}></test>'); equal(expand('test[baz=a foo=1]', reverse), '<test baz={a} foo="1" bar="bar"></test>'); }); it('numbering', () => { equal(expand('ul>li.item$@-*5'), '<ul>\n\t<li class="item5"></li>\n\t<li class="item4"></li>\n\t<li class="item3"></li>\n\t<li class="item2"></li>\n\t<li class="item1"></li>\n</ul>'); }); it('syntax', () => { equal(expand('ul>.item$*2', { syntax: 'html' }), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); equal(expand('ul>.item$*2', { syntax: 'slim' }), 'ul\n\tli.item1 \n\tli.item2 '); equal(expand('xsl:variable[name=a select=b]>div', { syntax: 'xsl' }), '<xsl:variable name="a">\n\t<div></div>\n</xsl:variable>'); }); it('custom profile', () => { equal(expand('img'), '<img src="" alt="">'); equal(expand('img', { options: { 'output.selfClosingStyle': 'xhtml' } }), '<img src="" alt="" />'); }); it('custom variables', () => { const variables = { charset: 'ru-RU' }; equal(expand('[charset=${charset}]{${charset}}'), '<div charset="UTF-8">UTF-8</div>'); equal(expand('[charset=${charset}]{${charset}}', { variables }), '<div charset="ru-RU">ru-RU</div>'); }); it('custom snippets', () => { const snippets = { link: 'link[foo=bar href]/', foo: '.foo[bar=baz]', repeat: 'div>ul>li{Hello World}*3' }; equal(expand('foo', { snippets }), '<div class="foo" bar="baz"></div>'); // `link:css` depends on `link` snippet so changing it will result in // altered `link:css` result equal(expand('link:css'), '<link rel="stylesheet" href="style.css">'); equal(expand('link:css', { snippets }), '<link foo="bar" href="style.css">'); // https://github.com/emmetio/emmet/issues/468 equal(expand('repeat', { snippets }), '<div>\n\t<ul>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t</ul>\n</div>'); }); it('formatter options', () => { equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); equal(expand('ul>.item$*2', { options: { 'comment.enabled': true } }), '<ul>\n\t<li class="item1"></li>\n\t<!-- /.item1 -->\n\t<li class="item2"></li>\n\t<!-- /.item2 -->\n</ul>'); equal(expand('div>p'), '<div>\n\t<p></p>\n</div>'); equal(expand('div>p', { options: { 'output.formatLeafNode': true } }), '<div>\n\t<p>\n\t\t\n\t</p>\n</div>'); }); it('JSX', () => { const config = { syntax: 'jsx' }; equal(expand('div#foo.bar', config), '<div id="foo" className="bar"></div>'); equal(expand('label[for=a]', config), '<label htmlFor="a"></label>'); equal(expand('Foo.Bar', config), '<Foo.Bar></Foo.Bar>'); equal(expand('div.{theme.style}', config), '<div className={theme.style}></div>'); }); it('override attributes', () => { const config = { syntax: 'jsx' }; equal(expand('.bar', config), '<div className="bar"></div>'); equal(expand('..bar', config), '<div styleName={styles.bar}></div>'); equal(expand('..foo-bar', config), '<div styleName={styles[\'foo-bar\']}></div>'); equal(expand('.foo', { syntax: 'vue' }), '<div class="foo"></div>'); equal(expand('..foo', { syntax: 'vue' }), '<div :class="foo"></div>'); }); it('wrap with abbreviation', () => { equal(expand('div>ul', { text: ['<div>line1</div>\n<div>line2</div>'] }), '<div>\n\t<ul>\n\t\t<div>line1</div>\n\t\t<div>line2</div>\n\t</ul>\n</div>'); equal(expand('p', { text: 'foo\nbar' }), '<p>\n\tfoo\n\tbar\n</p>'); equal(expand('p', { text: '<div>foo</div>' }), '<p>\n\t<div>foo</div>\n</p>'); equal(expand('p', { text: '<span>foo</span>' }), '<p><span>foo</span></p>'); equal(expand('p', { text: 'foo<span>foo</span>' }), '<p>foo<span>foo</span></p>'); equal(expand('p', { text: 'foo<div>foo</div>' }), '<p>foo<div>foo</div></p>'); }); it('wrap with abbreviation href', () => { equal(expand('a', { text: ['www.google.it'] }), '<a href="http://www.google.it">www.google.it</a>'); equal(expand('a', { text: ['then www.google.it'] }), '<a href="">then www.google.it</a>'); equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), '<a href="">www.google.it</a>'); equal(expand('map[name="https://example.com"]', { text: ['some text'] }), '<map name="https://example.com">some text</map>'); equal(expand('map[href="https://example.com"]', { text: ['some text'] }), '<map name="" href="https://example.com">some text</map>'); equal(expand('map[name="https://example.com"]>b', { text: ['some text'] }), '<map name="https://example.com"><b>some text</b></map>'); equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text false</u>'], options: { 'markup.href': false } }), '<a href="https://example.com"><b><u>some text false</u></b></a>'); equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text true</u>'], options: { 'markup.href': true } }), '<a href="https://example.com"><b><u>some text true</u></b></a>'); equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text false</p>'], options: { 'markup.href': false } }), '<a href="https://example.com">\n\t<div>\n\t\t<p>some text false</p>\n\t</div>\n</a>'); equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text true</p>'], options: { 'markup.href': true } }), '<a href="https://example.com">\n\t<div>\n\t\t<p>some text true</p>\n\t</div>\n</a>'); }); // it.only('debug', () => { // equal(expand('link:css'), '<link rel="stylesheet" href="style.css">'); // }); }); describe('Pug templates', () => { const config = resolveConfig({ syntax: 'pug' }); it('basic', () => { equal(expand('!', config), 'doctype html\nhtml(lang="en")\n\thead\n\t\tmeta(charset="UTF-8")\n\t\tmeta(http-equiv="X-UA-Compatible", content="IE=edge")\n\t\tmeta(name="viewport", content="width=device-width, initial-scale=1.0")\n\t\ttitle Document\n\tbody '); }); }); }); <MSG> Removed caching in markup abbreviation parser Naive implementation causes side-effects like incorrect repeater and inserted child content <DFF> @@ -4,6 +4,7 @@ import expand, { resolveConfig } from '../src'; describe('Expand Abbreviation', () => { describe('Markup', () => { it('basic', () => { + equal(expand('input[value="text$"]*2'), '<input type="text" value="text1"><input type="text" value="text2">'); equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); // insert text into abbreviation
1
Removed caching in markup abbreviation parser
0
.ts
ts
mit
emmetio/emmet
10070816
<NME> expand.ts <BEF> import { strictEqual as equal } from 'assert'; import expand, { resolveConfig } from '../src'; describe('Expand Abbreviation', () => { describe('Markup', () => { it('basic', () => { equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); // insert text into abbreviation equal(expand('ul>.item$*', { text: ['foo', 'bar'] }), '<ul>\n\t<li class="item1">foo</li>\n\t<li class="item2">bar</li>\n</ul>'); // insert TextMate-style fields/tabstops in output equal(expand('ul>.item$*2', { options: { 'output.field': (index, placeholder) => `\${${index}${placeholder ? ':' + placeholder : ''}}` } }), '<ul>\n\t<li class="item1">${1}</li>\n\t<li class="item2">${2}</li>\n</ul>'); }); it('attributes', () => { const snippets = { test: 'test[!foo bar. baz={}]' }; const opt = { snippets }; const reverse = { options: { 'output.reverseAttributes': true }, snippets }; equal(expand('a.test'), '<a href="" class="test"></a>'); equal(expand('a.test', reverse), '<a class="test" href=""></a>'); equal(expand('test', opt), '<test bar="bar" baz={}></test>'); equal(expand('test[foo]', opt), '<test bar="bar" baz={}></test>'); equal(expand('test[baz=a foo=1]', opt), '<test foo="1" bar="bar" baz={a}></test>'); equal(expand('map'), '<map name=""></map>'); equal(expand('map[]'), '<map name=""></map>'); equal(expand('map[name="valid"]'), '<map name="valid"></map>'); equal(expand('map[href="invalid"]'), '<map name="" href="invalid"></map>'); // Apply attributes in reverse order equal(expand('test', reverse), '<test bar="bar" baz={}></test>'); equal(expand('test[foo]', reverse), '<test bar="bar" baz={}></test>'); equal(expand('test[baz=a foo=1]', reverse), '<test baz={a} foo="1" bar="bar"></test>'); }); it('numbering', () => { equal(expand('ul>li.item$@-*5'), '<ul>\n\t<li class="item5"></li>\n\t<li class="item4"></li>\n\t<li class="item3"></li>\n\t<li class="item2"></li>\n\t<li class="item1"></li>\n</ul>'); }); it('syntax', () => { equal(expand('ul>.item$*2', { syntax: 'html' }), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); equal(expand('ul>.item$*2', { syntax: 'slim' }), 'ul\n\tli.item1 \n\tli.item2 '); equal(expand('xsl:variable[name=a select=b]>div', { syntax: 'xsl' }), '<xsl:variable name="a">\n\t<div></div>\n</xsl:variable>'); }); it('custom profile', () => { equal(expand('img'), '<img src="" alt="">'); equal(expand('img', { options: { 'output.selfClosingStyle': 'xhtml' } }), '<img src="" alt="" />'); }); it('custom variables', () => { const variables = { charset: 'ru-RU' }; equal(expand('[charset=${charset}]{${charset}}'), '<div charset="UTF-8">UTF-8</div>'); equal(expand('[charset=${charset}]{${charset}}', { variables }), '<div charset="ru-RU">ru-RU</div>'); }); it('custom snippets', () => { const snippets = { link: 'link[foo=bar href]/', foo: '.foo[bar=baz]', repeat: 'div>ul>li{Hello World}*3' }; equal(expand('foo', { snippets }), '<div class="foo" bar="baz"></div>'); // `link:css` depends on `link` snippet so changing it will result in // altered `link:css` result equal(expand('link:css'), '<link rel="stylesheet" href="style.css">'); equal(expand('link:css', { snippets }), '<link foo="bar" href="style.css">'); // https://github.com/emmetio/emmet/issues/468 equal(expand('repeat', { snippets }), '<div>\n\t<ul>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t\t<li>Hello World</li>\n\t</ul>\n</div>'); }); it('formatter options', () => { equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); equal(expand('ul>.item$*2', { options: { 'comment.enabled': true } }), '<ul>\n\t<li class="item1"></li>\n\t<!-- /.item1 -->\n\t<li class="item2"></li>\n\t<!-- /.item2 -->\n</ul>'); equal(expand('div>p'), '<div>\n\t<p></p>\n</div>'); equal(expand('div>p', { options: { 'output.formatLeafNode': true } }), '<div>\n\t<p>\n\t\t\n\t</p>\n</div>'); }); it('JSX', () => { const config = { syntax: 'jsx' }; equal(expand('div#foo.bar', config), '<div id="foo" className="bar"></div>'); equal(expand('label[for=a]', config), '<label htmlFor="a"></label>'); equal(expand('Foo.Bar', config), '<Foo.Bar></Foo.Bar>'); equal(expand('div.{theme.style}', config), '<div className={theme.style}></div>'); }); it('override attributes', () => { const config = { syntax: 'jsx' }; equal(expand('.bar', config), '<div className="bar"></div>'); equal(expand('..bar', config), '<div styleName={styles.bar}></div>'); equal(expand('..foo-bar', config), '<div styleName={styles[\'foo-bar\']}></div>'); equal(expand('.foo', { syntax: 'vue' }), '<div class="foo"></div>'); equal(expand('..foo', { syntax: 'vue' }), '<div :class="foo"></div>'); }); it('wrap with abbreviation', () => { equal(expand('div>ul', { text: ['<div>line1</div>\n<div>line2</div>'] }), '<div>\n\t<ul>\n\t\t<div>line1</div>\n\t\t<div>line2</div>\n\t</ul>\n</div>'); equal(expand('p', { text: 'foo\nbar' }), '<p>\n\tfoo\n\tbar\n</p>'); equal(expand('p', { text: '<div>foo</div>' }), '<p>\n\t<div>foo</div>\n</p>'); equal(expand('p', { text: '<span>foo</span>' }), '<p><span>foo</span></p>'); equal(expand('p', { text: 'foo<span>foo</span>' }), '<p>foo<span>foo</span></p>'); equal(expand('p', { text: 'foo<div>foo</div>' }), '<p>foo<div>foo</div></p>'); }); it('wrap with abbreviation href', () => { equal(expand('a', { text: ['www.google.it'] }), '<a href="http://www.google.it">www.google.it</a>'); equal(expand('a', { text: ['then www.google.it'] }), '<a href="">then www.google.it</a>'); equal(expand('a', { text: ['www.google.it'], options: { 'markup.href': false } }), '<a href="">www.google.it</a>'); equal(expand('map[name="https://example.com"]', { text: ['some text'] }), '<map name="https://example.com">some text</map>'); equal(expand('map[href="https://example.com"]', { text: ['some text'] }), '<map name="" href="https://example.com">some text</map>'); equal(expand('map[name="https://example.com"]>b', { text: ['some text'] }), '<map name="https://example.com"><b>some text</b></map>'); equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text false</u>'], options: { 'markup.href': false } }), '<a href="https://example.com"><b><u>some text false</u></b></a>'); equal(expand('a[href="https://example.com"]>b', { text: ['<u>some text true</u>'], options: { 'markup.href': true } }), '<a href="https://example.com"><b><u>some text true</u></b></a>'); equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text false</p>'], options: { 'markup.href': false } }), '<a href="https://example.com">\n\t<div>\n\t\t<p>some text false</p>\n\t</div>\n</a>'); equal(expand('a[href="https://example.com"]>div', { text: ['<p>some text true</p>'], options: { 'markup.href': true } }), '<a href="https://example.com">\n\t<div>\n\t\t<p>some text true</p>\n\t</div>\n</a>'); }); // it.only('debug', () => { // equal(expand('link:css'), '<link rel="stylesheet" href="style.css">'); // }); }); describe('Pug templates', () => { const config = resolveConfig({ syntax: 'pug' }); it('basic', () => { equal(expand('!', config), 'doctype html\nhtml(lang="en")\n\thead\n\t\tmeta(charset="UTF-8")\n\t\tmeta(http-equiv="X-UA-Compatible", content="IE=edge")\n\t\tmeta(name="viewport", content="width=device-width, initial-scale=1.0")\n\t\ttitle Document\n\tbody '); }); }); }); <MSG> Removed caching in markup abbreviation parser Naive implementation causes side-effects like incorrect repeater and inserted child content <DFF> @@ -4,6 +4,7 @@ import expand, { resolveConfig } from '../src'; describe('Expand Abbreviation', () => { describe('Markup', () => { it('basic', () => { + equal(expand('input[value="text$"]*2'), '<input type="text" value="text1"><input type="text" value="text2">'); equal(expand('ul>.item$*2'), '<ul>\n\t<li class="item1"></li>\n\t<li class="item2"></li>\n</ul>'); // insert text into abbreviation
1
Removed caching in markup abbreviation parser
0
.ts
ts
mit
emmetio/emmet
10070817
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies end ``` By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). end ``` __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #634 from splitrb/add-documentation-on-cookie-storage Adding documentation related to what is stored on cookies. <DFF> @@ -263,7 +263,7 @@ Split.configure do |config| end ``` -By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). +When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| @@ -272,6 +272,8 @@ Split.configure do |config| end ``` +The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } + __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis
3
Merge pull request #634 from splitrb/add-documentation-on-cookie-storage
1
.md
md
mit
splitrb/split
10070818
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies end ``` By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). end ``` __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #634 from splitrb/add-documentation-on-cookie-storage Adding documentation related to what is stored on cookies. <DFF> @@ -263,7 +263,7 @@ Split.configure do |config| end ``` -By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). +When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| @@ -272,6 +272,8 @@ Split.configure do |config| end ``` +The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } + __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis
3
Merge pull request #634 from splitrb/add-documentation-on-cookie-storage
1
.md
md
mit
splitrb/split
10070819
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` You now have a Redis daemon running on port `6379`. ### Setup ```bash gem install split ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies end ``` By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). end ``` __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #634 from splitrb/add-documentation-on-cookie-storage Adding documentation related to what is stored on cookies. <DFF> @@ -263,7 +263,7 @@ Split.configure do |config| end ``` -By default, cookies will expire in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). +When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| @@ -272,6 +272,8 @@ Split.configure do |config| end ``` +The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } + __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis
3
Merge pull request #634 from splitrb/add-documentation-on-cookie-storage
1
.md
md
mit
splitrb/split
10070820
<NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' && typeof options.container === 'undefined') { default_meow_area = $(window.document.createElement('div')) .attr({'id': ((new Date()).getTime()), 'class': 'meows'}); $('body').prepend(default_meow_area); } if (meows.size() <= 0) { if (typeof options.beforeCreateFirst === 'function') { options.beforeCreateFirst.call(that); } } if (typeof options.container === 'string') { this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof jQuery) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); // Add title if it's defined if (typeof this.title === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('h1')).text(this.title) ); } // Add icon if it's defined if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (event.type === 'mouseleave') { that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else { that.hovered = true; that.manifest.addClass('hover'); } }); // Add a timeout if the duration isn't Infinity if (this.duration !== Infinity) { this.timeout = window.setTimeout(function () { // Make sure this meow hasn't already been destroyed if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow if (that.hovered !== true && typeof that === 'object') { that.destroy(); } } }, that.duration); } this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); } } }); }); } }; }; $.fn.meow = function (args) { var meow = new Meow(args); meows.add(meow); return meow; }; $.meow = $.fn.meow; }(jQuery, window)); <MSG> Consistently use my own passed-in $ <DFF> @@ -55,7 +55,7 @@ if (typeof options.message === 'string') { this.message = options.message; - } else if (options.message instanceof jQuery) { + } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else {
1
Consistently use my own passed-in $
1
.js
meow
mit
zacstewart/Meow
10070821
<NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' && typeof options.container === 'undefined') { default_meow_area = $(window.document.createElement('div')) .attr({'id': ((new Date()).getTime()), 'class': 'meows'}); $('body').prepend(default_meow_area); } if (meows.size() <= 0) { if (typeof options.beforeCreateFirst === 'function') { options.beforeCreateFirst.call(that); } } if (typeof options.container === 'string') { this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof jQuery) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); // Add title if it's defined if (typeof this.title === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('h1')).text(this.title) ); } // Add icon if it's defined if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (event.type === 'mouseleave') { that.hovered = false; that.manifest.removeClass('hover'); // Destroy the mow on mouseleave if it's timed out if (that.timestamp + that.duration <= new Date().getTime()) { that.destroy(); } } else { that.hovered = true; that.manifest.addClass('hover'); } }); // Add a timeout if the duration isn't Infinity if (this.duration !== Infinity) { this.timeout = window.setTimeout(function () { // Make sure this meow hasn't already been destroyed if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow if (that.hovered !== true && typeof that === 'object') { that.destroy(); } } }, that.duration); } this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); } } }); }); } }; }; $.fn.meow = function (args) { var meow = new Meow(args); meows.add(meow); return meow; }; $.meow = $.fn.meow; }(jQuery, window)); <MSG> Consistently use my own passed-in $ <DFF> @@ -55,7 +55,7 @@ if (typeof options.message === 'string') { this.message = options.message; - } else if (options.message instanceof jQuery) { + } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else {
1
Consistently use my own passed-in $
1
.js
meow
mit
zacstewart/Meow
10070822
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end end it "should use the specified algorithm if a winner does not exist" do Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) experiment.next_alternative.name.should eql('green') end end experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one. <DFF> @@ -225,7 +225,8 @@ describe Split::Experiment do end it "should use the specified algorithm if a winner does not exist" do - Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) + experiment.algorithm = Split::Algorithms::Whiplash + experiment.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) experiment.next_alternative.name.should eql('green') end end
2
Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one.
1
.rb
rb
mit
splitrb/split
10070823
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end end it "should use the specified algorithm if a winner does not exist" do Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) experiment.next_alternative.name.should eql('green') end end experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one. <DFF> @@ -225,7 +225,8 @@ describe Split::Experiment do end it "should use the specified algorithm if a winner does not exist" do - Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) + experiment.algorithm = Split::Algorithms::Whiplash + experiment.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) experiment.next_alternative.name.should eql('green') end end
2
Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one.
1
.rb
rb
mit
splitrb/split
10070824
<NME> experiment_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "time" describe Split::Experiment do def new_experiment(goals = []) Split::Experiment.new("link_color", alternatives: ["blue", "red", "green"], goals: goals) end def alternative(color) Split::Alternative.new(color, "link_color") end let(:experiment) { new_experiment } let(:blue) { alternative("blue") } let(:green) { alternative("green") } context "with an experiment" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"]) } it "should have a name" do expect(experiment.name).to eq("basket_text") end it "should have alternatives" do expect(experiment.alternatives.length).to be 2 end it "should have alternatives with correct names" do expect(experiment.alternatives.collect { |a| a.name }).to eq(["Basket", "Cart"]) end it "should be resettable by default" do expect(experiment.resettable).to be_truthy end it "should save to redis" do experiment.save expect(Split.redis.exists?("basket_text")).to be true end it "should save the start time to redis" do experiment_start_time = Time.at(1372167761) expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should not save the start time to redis when start_manually is enabled" do expect(Split.configuration).to receive(:start_manually).and_return(true) experiment.save expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should save the selected algorithm to redis" do experiment_algorithm = Split::Algorithms::Whiplash experiment.algorithm = experiment_algorithm experiment.save expect(Split::ExperimentCatalog.find("basket_text").algorithm).to eq(experiment_algorithm) end it "should handle having a start time stored as a string" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).twice.and_return(experiment_start_time) experiment.save Split.redis.hset(:experiment_start_times, experiment.name, experiment_start_time.to_s) expect(Split::ExperimentCatalog.find("basket_text").start_time).to eq(experiment_start_time) end it "should handle not having a start time" do experiment_start_time = Time.parse("Sat Mar 03 14:01:03") expect(Time).to receive(:now).and_return(experiment_start_time) experiment.save Split.redis.hdel(:experiment_start_times, experiment.name) expect(Split::ExperimentCatalog.find("basket_text").start_time).to be_nil end it "should not create duplicates when saving multiple times" do experiment.save experiment.save expect(Split.redis.exists?("basket_text")).to be true expect(Split.redis.lrange("basket_text", 0, -1)).to eq(['{"Basket":1}', '{"Cart":1}']) end describe "new record?" do it "should know if it hasn't been saved yet" do expect(experiment.new_record?).to be_truthy end it "should know if it has been saved yet" do experiment.save expect(experiment.new_record?).to be_falsey end end describe "control" do it "should be the first alternative" do experiment.save expect(experiment.control.name).to eq("Basket") end end end describe "initialization" do it "should set the algorithm when passed as an option to the initializer" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end it "should be possible to make an experiment not resettable" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) expect(experiment.resettable).to be_falsey end context "from configuration" do let(:experiment_name) { :my_experiment } let(:experiments) do { experiment_name => { alternatives: ["Control Opt", "Alt one"] } } end before { Split.configuration.experiments = experiments } it "assigns default values to the experiment" do expect(Split::Experiment.new(experiment_name).resettable).to eq(true) end end end describe "persistent configuration" do it "should persist resettable in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], resettable: false) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.resettable).to be_falsey end describe "#metadata" do let(:experiment) { Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash, metadata: meta) } let(:meta) { { a: "b" } } before do experiment.save end it "should delete the key when metadata is removed" do experiment.metadata = nil experiment.save expect(Split.redis.exists?(experiment.metadata_key)).to be_falsey end context "simple hash" do let(:meta) { { "basket" => "a", "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end context "nested hash" do let(:meta) { { "basket" => { "one" => "two" }, "cart" => "b" } } it "should persist metadata in redis" do e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.metadata).to eq(meta) end end end it "should persist algorithm in redis" do experiment = Split::Experiment.new("basket_text", alternatives: ["Basket", "Cart"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("basket_text") expect(e).to eq(experiment) expect(e.algorithm).to eq(Split::Algorithms::Whiplash) end it "should persist a new experiment in redis, that does not exist in the configuration file" do experiment = Split::Experiment.new("foobar", alternatives: ["tra", "la"], algorithm: Split::Algorithms::Whiplash) experiment.save e = Split::ExperimentCatalog.find("foobar") expect(e).to eq(experiment) expect(e.alternatives.collect { |a| a.name }).to eq(["tra", "la"]) end end describe "deleting" do it "should delete itself" do experiment = Split::Experiment.new("basket_text", alternatives: [ "Basket", "Cart"]) experiment.save experiment.delete expect(Split.redis.exists?("link_color")).to be false expect(Split::ExperimentCatalog.find("link_color")).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.delete expect(experiment.version).to eq(1) end it "should call the on_experiment_delete hook" do expect(Split.configuration.on_experiment_delete).to receive(:call) experiment.delete end end it "should use the specified algorithm if a winner does not exist" do Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) experiment.next_alternative.name.should eql('green') end end experiment.start experiment.delete expect(experiment.start_time).to be_nil end it "should default cohorting back to false" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq(true) experiment.delete expect(experiment.cohorting_disabled?).to eq(false) end end describe "winner" do it "should have no winner initially" do expect(experiment.winner).to be_nil end end describe "winner=" do it "should allow you to specify a winner" do experiment.save experiment.winner = "red" expect(experiment.winner.name).to eq("red") end it "should call the on_experiment_winner_choose hook" do expect(Split.configuration.on_experiment_winner_choose).to receive(:call) experiment.winner = "green" end context "when has_winner state is memoized" do before { expect(experiment).to_not have_winner } it "should keep has_winner state consistent" do experiment.winner = "red" expect(experiment).to have_winner end end end describe "reset_winner" do before { experiment.winner = "green" } it "should reset the winner" do experiment.reset_winner expect(experiment.winner).to be_nil end context "when has_winner state is memoized" do before { expect(experiment).to have_winner } it "should keep has_winner state consistent" do experiment.reset_winner expect(experiment).to_not have_winner end end end describe "has_winner?" do context "with winner" do before { experiment.winner = "red" } it "returns true" do expect(experiment).to have_winner end end context "without winner" do it "returns false" do expect(experiment).to_not have_winner end end it "memoizes has_winner state" do expect(experiment).to receive(:winner).once expect(experiment).to_not have_winner expect(experiment).to_not have_winner end end describe "reset" do let(:reset_manually) { false } before do allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) experiment.save green.increment_participation green.increment_participation end it "should reset all alternatives" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end it "should reset the winner" do experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation experiment.reset expect(experiment.winner).to be_nil end it "should increment the version" do expect(experiment.version).to eq(0) experiment.reset expect(experiment.version).to eq(1) end it "should call the on_experiment_reset hook" do expect(Split.configuration.on_experiment_reset).to receive(:call) experiment.reset end it "should call the on_before_experiment_reset hook" do expect(Split.configuration.on_before_experiment_reset).to receive(:call) experiment.reset end end describe "algorithm" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } it "should use the default algorithm if none is specified" do expect(experiment.algorithm).to eq(Split.configuration.algorithm) end it "should use the user specified algorithm for this experiment if specified" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to eq(Split::Algorithms::Whiplash) end end describe "#next_alternative" do context "with multiple alternatives" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red", "green") } context "with winner" do it "should always return the winner" do green = Split::Alternative.new("green", "link_color") experiment.winner = "green" expect(experiment.next_alternative.name).to eq("green") green.increment_participation expect(experiment.next_alternative.name).to eq("green") end end context "without winner" do it "should use the specified algorithm" do experiment.algorithm = Split::Algorithms::Whiplash expect(experiment.algorithm).to receive(:choose_alternative).and_return(Split::Alternative.new("green", "link_color")) expect(experiment.next_alternative.name).to eq("green") end end end context "with single alternative" do let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue") } it "should always return the only alternative" do expect(experiment.next_alternative.name).to eq("blue") expect(experiment.next_alternative.name).to eq("blue") end end end describe "#cohorting_disabled?" do it "returns false when nothing has been configured" do expect(experiment.cohorting_disabled?).to eq false end it "returns true when enable_cohorting is performed" do experiment.enable_cohorting expect(experiment.cohorting_disabled?).to eq false end it "returns false when nothing has been configured" do experiment.disable_cohorting expect(experiment.cohorting_disabled?).to eq true end end describe "changing an existing experiment" do def same_but_different_alternative Split::ExperimentCatalog.find_or_create("link_color", "blue", "yellow", "orange") end it "should reset an experiment if it is loaded with different alternatives" do experiment.save blue.participant_count = 5 same_experiment = same_but_different_alternative expect(same_experiment.alternatives.map(&:name)).to eq(["blue", "yellow", "orange"]) expect(blue.participant_count).to eq(0) end it "should only reset once" do experiment.save expect(experiment.version).to eq(0) same_experiment = same_but_different_alternative expect(same_experiment.version).to eq(1) same_experiment_again = same_but_different_alternative expect(same_experiment_again.version).to eq(1) end context "when metadata is changed" do it "should increase version" do experiment.save experiment.metadata = { "foo" => "bar" } expect { experiment.save }.to change { experiment.version }.by(1) end it "does not increase version" do experiment.metadata = nil experiment.save expect { experiment.save }.to change { experiment.version }.by(0) end end context "when experiment configuration is changed" do let(:reset_manually) { false } before do experiment.save allow(Split.configuration).to receive(:reset_manually).and_return(reset_manually) green.increment_participation green.increment_participation experiment.set_alternatives_and_options(alternatives: %w(blue red green zip), goals: %w(purchase)) experiment.save end it "resets all alternatives" do expect(green.participant_count).to eq(0) expect(green.completed_count).to eq(0) end context "when reset_manually is set" do let(:reset_manually) { true } it "does not reset alternatives" do expect(green.participant_count).to eq(2) expect(green.completed_count).to eq(0) end end end end describe "alternatives passed as non-strings" do it "should throw an exception if an alternative is passed that is not a string" do expect { Split::ExperimentCatalog.find_or_create("link_color", :blue, :red) }.to raise_error(ArgumentError) expect { Split::ExperimentCatalog.find_or_create("link_enabled", true, false) }.to raise_error(ArgumentError) end end describe "specifying weights" do let(:experiment_with_weight) { Split::ExperimentCatalog.find_or_create("link_color", { "blue" => 1 }, { "red" => 2 }) } it "should work for a new experiment" do expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end it "should work for an existing experiment" do experiment.save expect(experiment_with_weight.alternatives.map(&:weight)).to eq([1, 2]) end end describe "specifying goals" do let(:experiment) { new_experiment(["purchase"]) } context "saving experiment" do let(:same_but_different_goals) { Split::ExperimentCatalog.find_or_create({ "link_color" => ["purchase", "refund"] }, "blue", "red", "green") } before { experiment.save } it "can find existing experiment" do expect(Split::ExperimentCatalog.find("link_color").name).to eq("link_color") end it "should reset an experiment if it is loaded with different goals" do same_but_different_goals expect(Split::ExperimentCatalog.find("link_color").goals).to eq(["purchase", "refund"]) end end it "should have goals" do expect(experiment.goals).to eq(["purchase"]) end context "find or create experiment" do it "should have correct goals" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.goals).to eq(["purchase", "refund"]) experiment = Split::ExperimentCatalog.find_or_create("link_color3", "blue", "red", "green") expect(experiment.goals).to eq([]) end end end describe "beta probability calculation" do it "should return a hash with the probability of each alternative being the best" do experiment = Split::ExperimentCatalog.find_or_create("mathematicians", "bernoulli", "poisson", "lagrange") experiment.calc_winning_alternatives expect(experiment.alternative_probabilities).not_to be_nil end it "should return between 46% and 54% probability for an experiment with 2 alternatives and no data" do experiment = Split::ExperimentCatalog.find_or_create("scientists", "einstein", "bohr") experiment.calc_winning_alternatives expect(experiment.alternatives[0].p_winner).to be_within(0.04).of(0.50) end it "should calculate the probability of being the winning alternative separately for each goal", skip: true do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") goal1 = experiment.goals[0] goal2 = experiment.goals[1] experiment.alternatives.each do |alternative| alternative.participant_count = 50 alternative.set_completed_count(10, goal1) alternative.set_completed_count(15+rand(30), goal2) end experiment.calc_winning_alternatives alt = experiment.alternatives[0] p_goal1 = alt.p_winner(goal1) p_goal2 = alt.p_winner(goal2) expect(p_goal1).not_to be_within(0.04).of(p_goal2) end it "should return nil and not re-calculate probabilities if they have already been calculated today" do experiment = Split::ExperimentCatalog.find_or_create({ "link_color3" => ["purchase", "refund"] }, "blue", "red", "green") expect(experiment.calc_winning_alternatives).not_to be nil expect(experiment.calc_winning_alternatives).to be nil end end end <MSG> Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one. <DFF> @@ -225,7 +225,8 @@ describe Split::Experiment do end it "should use the specified algorithm if a winner does not exist" do - Split.configuration.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) + experiment.algorithm = Split::Algorithms::Whiplash + experiment.algorithm.should_receive(:choose_alternative).and_return(Split::Alternative.new('green', 'link_color')) experiment.next_alternative.name.should eql('green') end end
2
Akriti/Sumedha - Now random_alternative method in experiment class uses the specified algorithm and not the default one.
1
.rb
rb
mit
splitrb/split
10070825
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end it "should increment the counter for the specified-goal completed alternative" do @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) finished({"link_color" => "purchase"}) new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1) new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2) end end end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Simplify goals helper spec <DFF> @@ -900,13 +900,15 @@ describe Split::Helper do end it "should increment the counter for the specified-goal completed alternative" do - @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) - @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) - finished({"link_color" => "purchase"}) - new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) - new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1) - new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) - new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2) + lambda { + lambda { + finished({"link_color" => ["purchase"]}) + }.should_not change { + Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) + } + }.should change { + Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) + }.by(1) end end end
9
Simplify goals helper spec
7
.rb
rb
mit
splitrb/split
10070826
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end it "should increment the counter for the specified-goal completed alternative" do @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) finished({"link_color" => "purchase"}) new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1) new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2) end end end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Simplify goals helper spec <DFF> @@ -900,13 +900,15 @@ describe Split::Helper do end it "should increment the counter for the specified-goal completed alternative" do - @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) - @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) - finished({"link_color" => "purchase"}) - new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) - new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1) - new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) - new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2) + lambda { + lambda { + finished({"link_color" => ["purchase"]}) + }.should_not change { + Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) + } + }.should change { + Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) + }.by(1) end end end
9
Simplify goals helper spec
7
.rb
rb
mit
splitrb/split
10070827
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) end it "should not call the method without alternative" do ab_user[@experiment.key] = nil expect(self).not_to receive(:some_method) ab_finished(@experiment_name) end end end context "for an experiment that the user is excluded from" do before do alternative = ab_test("link_color", "blue", "red") expect(Split::Alternative.new(alternative, "link_color").participant_count).to eq(1) alternative = ab_test("button_size", "small", "big") expect(Split::Alternative.new(alternative, "button_size").participant_count).to eq(0) end it "should not increment the completed counter" do # So, user should be participating in the link_color experiment and # receive the control for button_size. As the user is not participating in # the button size experiment, finishing it should not increase the # completion count for that alternative. expect { ab_finished("button_size") }.not_to change { Split::Alternative.new("small", "button_size").completed_count } end end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end it "should increment the counter for the specified-goal completed alternative" do @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) finished({"link_color" => "purchase"}) new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1) new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2) end end end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Simplify goals helper spec <DFF> @@ -900,13 +900,15 @@ describe Split::Helper do end it "should increment the counter for the specified-goal completed alternative" do - @previous_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) - @previous_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) - finished({"link_color" => "purchase"}) - new_completion_count_for_goal1 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) - new_completion_count_for_goal1.should eql(@previous_completion_count_for_goal1 + 1) - new_completion_count_for_goal2 = Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) - new_completion_count_for_goal2.should eql(@previous_completion_count_for_goal2) + lambda { + lambda { + finished({"link_color" => ["purchase"]}) + }.should_not change { + Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) + } + }.should change { + Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) + }.by(1) end end end
9
Simplify goals helper spec
7
.rb
rb
mit
splitrb/split
10070828
<NME> CHANGELOG.md <BEF> ## 3.4.1 (November 12th, 2019) Bugfixes: - Bump minimum required redis to 4.2 (@andrehjr, #628) - Removed repeated loading from config (@robin-phung, #619) - Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632) - Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631) - Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630) - Fix typo of in `Split::Trial` class variable (TomasBarry, #644) - Single HSET to update values, instead of multiple ones (@andrehjr, #640) - Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659) - Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639) - Adding documentation related to what is stored on cookies. (@andrehjr, #634) - Keep railtie defined under the Split gem namespace (@avit, #666) - Update RSpec helper to support block syntax (@clowder, #665) ## 3.4.1 (November 12th, 2019) Bugfixes: - Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) ## 3.4.0 (November 9th, 2019) Features: - Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release. - Make dashboard pagination default "per" param configurable (@alopatin, #597) Bugfixes: - Fix `force_alternative` for experiments with incremented version (@giraffate, #568) - Persist alternative weights (@giraffate, #570) - Combined experiment performance improvements (@gnanou, #575) - Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577) - When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582) Misc: - Remove `rubyforge_project` from gemspec (@giraffate, #583) - Fix URLs to replace http with https (@giraffate , #584) - Lazily include split helpers in ActionController::Base (@hasghari, #586) - Fix unused variable warnings (@andrehjr, #592) - Fix ruby warnings (@andrehjr, #593) - Update rubocop.yml config (@andrehjr, #594) - Add frozen_string_literal to all files that were missing it (@andrehjr, #595) ## 3.3.2 (April 12th, 2019) Features: - Added uptime robot to configuration.rb (@razel1982, #556) - Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555) Bugfixes: - Fix error message interpolation (@hanibash, #553) - Fix Bigdecimal warnings (@agraves, #551) - Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544) - Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr) Misc: - Update travis to add Rails 6 (@edmilton, #559) - Fix broken specs in developement environment (@dougpetronilio, #557) ## 3.3.1 (January 11th, 2019) Features: - Filter some more bots (@janosch-x, #542) Bugfixes: - Fix Dashboard Pagination Helper typo (@cattekin, #541) - Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539) - fix user participating alternative not found (@NaturalHokke, #536) Misc: - Tweak RSpec instructions (@eliotsykes, #540) - Improve README regarding rspec usage (@vermaxik, #538) ## 3.3.0 (August 13th, 2018) Features: - Added pagination for dashboard (@GeorgeGorbanev, #518) - Add Facebot crawler to list of bots (@pfeiffer, #530) - Ignore previewing requests (@pfeiffer, #531) - Fix binding of ignore_filter (@pfeiffer, #533) Bugfixes: - Fix cookie header duplication (@andrehjr, #522) Performance: - Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509) Misc: - Update development dependencies - test rails 5.2 on travis (@lostapathy, #524) - update ruby versions for travis (@lostapathy, #525) ## 3.2.0 (September 21st, 2017) Features: - Allow configuration of how often winning alternatives are recalculated (@patbl, #501) Bugfixes: - Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503) - Fix combined experiments (@semanticart, #502) ## 3.1.1 (August 30th, 2017) Bugfixes: - Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498) Misc: - Document testing with RSpec (@eliotsykes, #495) ## 3.1.0 (August 14th, 2017) Features: - Support for combined experiments (@daviddening, #493) - Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490) - Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487) Bugfixes: - Blocked a few more common bot user agents (@kylerippey, #485) Misc: - Repository Audit by Maintainer.io (@RichardLitt, #484) - Update development dependencies - Test on ruby 2.4.1 - Test compatibility with rails 5.1 - Add uris to metadata section in gemspec ## 3.0.0 (March 30th, 2017) Features: - added block randomization algorithm and specs (@hulleywood, #475) - Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460) Bugfixes: - Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470) - Fix issue where redis isn't required (@tomciopp , #466) Misc: - Avoid variable_size_secure_compare private method (@eliotsykes, #465) ## 2.2.0 (November 11th, 2016) **Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset. Features: - Remove dependency on Redis::Namespace (@bschaeffer, #425) - Make resetting on experiment change optional (@moggyboy, #430) - Add ability to force alternative on dashboard (@ccallebs, #437) Bugfixes: - Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432) Misc: - Remove Explicit Return (@BradHudson, #441) - Update Redis config docs (@bschaeffer, #422) - Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443) - Removed a couple old ruby 1.8 hacks (@andrew, #456) - Run tests on rails 5 (@andrew, #457) - Fixed a few codeclimate warnings (@andrew, #458) - Use codeclimate for test coverage (@andrew #455) ## 2.1.0 (August 8th, 2016) Features: - Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426) ## 2.0.0 (July 17th, 2016) Breaking changes: - Removed deprecated `finished` and `begin_experiment` methods - Namespaced override param to avoid potential clashes (@henrik, #398) ## 1.7.0 (June 28th, 2016) Features: - Running concurrent experiments on same endpoint/view (@karmakaze, #421) ## 1.6.0 (June 16th, 2016) Features: - Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420) ## 1.5.0 (June 8th, 2016) Features: - Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409) - Optional custom persistence adapter (@ndelage, #411) Misc: - Use fakeredis for testing (@andrew, #412) ## 1.4.5 (June 7th, 2016) Bugfixes: - FIX Negative numbers on non-finished (@divineforest, #408) - Eliminate extra RedisAdapter hget (@karmakaze, #407) - Remove unecessary code from Experiment class (@pakallis, #391, #392, #393) Misc: - Simplify Configuration#normalized_experiments (@pakallis, #395) - Clarify test running instructions (@henrik, #397) ## 1.4.4 (May 9th, 2016) Bugfixes: - Increment participation if store override is true and no experiment key exists (@spheric, #380) Misc: - Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389) - Added minimum version requirement to simple-random - Clarify finished with first option being a hash in Readme (@henrik, #382) - Refactoring the User abstraction (@andreibondarev, #384) ## 1.4.3 (April 28th, 2016) Features: - add on_trial callback whenever a trial is started (@mtyeh411, #375) Bugfixes: - Allow algorithm configuration at experiment level (@007sumit, #376) Misc: - only choose override if it exists as valid alternative (@spheric, #377) ## 1.4.2 (April 25th, 2016) Misc: - Deprecated some legacy methods (@andreibondarev, #374) ## 1.4.1 (April 21st, 2016) Bugfixes: - respect manual start configuration after an experiment has been deleted (@mtyeh411, #372) Misc: - Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365) - Revise specs according to http://betterspecs.org/ (@hkliya, #369) ## 1.4.0 (April 2nd, 2016) Features: - Added experiment filters to dashboard (@ccallebs, #363, #364) - Added Contributor Covenant Code of Conduct ## 1.3.2 (January 2nd, 2016) Bugfixes: - Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352) ## 1.3.1 (January 1st, 2016) Bugfixes: - Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349) ## 1.3.0 (October 20th, 2015) Features: - allow for custom redis_url different from ENV variable (@davidgrieser, #323) - add ability to change the length of the persistence cookie (@peterylai, #335) Bugfixes: - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342) - Fix active experiments when experiment is on a later version (@ndrisso, #331) - Fix caching of winning alternative (@nfm, #329) Misc: - Remove duplication from Experiment#save (@pakallis, #333) - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332) ## 1.2.1 (May 17th, 2015) Features: - Handle redis DNS resolution failures gracefully (@fusion2004, #310) - Push metadata to ab_test block (@ekorneeff, #296) - Helper methods are now private when included in controllers (@ipoval, #303) Bugfixes: - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313) - Don't use capture helper from ActionView (@tomasdundacek, #312) Misc: - Remove body "max-width" from dashboard (@xicreative, #299) - fix private for class methods (@ipoval, #301) - minor memoization fix in spec (@ipoval, #304) - Minor documentation fixes (#295, #297, #305, #308) ## 1.2.0 (January 24th, 2015) Features: - Configure redis using environment variables if available (@saratovsource , #293) - Store metadata on experiment configuration (@dekz, #291) Bugfixes: - Revert the Trial#complete! public API to support noargs (@dekz, #292) ## 1.1.0 (January 9th, 2015) Changes: - Public class methods on `Split::Experiment` (e.g., `find_or_create`) have been moved to `Split::ExperimentCatalog`. Features: - Decouple trial from Split::Helper (@joshdover, #286) - Helper method for Active Experiments (@blahblahblah-, #273) Misc: - Use the new travis container based infrastructure for tests (@andrew, #280) ## 1.0.0 (October 12th, 2014) Changes: - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271) ## 0.8.0 (September 25th, 2014) Features: - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251) - support multiple metrics per experiment (@stevenou, #260) Bugfixes: - Avoiding call to params in EncapsulatedHelper (@afn, #257) ## 0.7.3 (September 16th, 2014) Features: - Disable all split tests via a URL parameter (@hwartig, #263) Bugfixes: - Correctly escape experiment names on dashboard (@ecaron, #265) - Handle redis connection exception error properly (@andrew, #245) ## 0.7.2 (June 12th, 2014) Features: - Show metrics on the dashboard (@swrobel, #241) Bugfixes: - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253) - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255) ## 0.7.1 (March 20th, 2014) Features: - You can now reopen experiment from the dashboard (@mikezaby, #235) Misc: - Internal code tidy up (@IanVaughan, #238) ## 0.7.0 (December 26th, 2013) Features: - Significantly improved z-score algorithm (@caser ,#221) - Better sorting of Experiments on dashboard (@wadako111, #218) Bugfixes: - Fixed start button not being displayed in some cases (@vigosan, #219) Misc: - Experiment#initialize refactoring (@nberger, #224) - Extract ExperimentStore into a seperate class (@nberger, #225) ## 0.6.6 (October 15th, 2013) Features: - Sort experiments on Dashboard so "active" ones without a winner appear first (@swrobel, #204) - Starting tests manually (@duksis, #209) Bugfixes: - Only trigger completion callback with valid Trial (@segfaultAX, #208) - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213) Misc: - Added more bots to filter list (@lbeder, #214, #215, #216) ## 0.6.5 (August 23, 2013) Features: - Added Redis adapter for persisting experiments across sessions (@fengb, #203) Misc: - Expand upon algorithms section in README (@swrobel, #200) ## 0.6.4 (August 8, 2013) Features: - Add hooks for experiment deletion and resetting (@craigmcnamara, #198) - Allow Split::Helper to be used outside of a controller (@nfm, #190) - Show current Rails/Rack Env in dashboard (@rceee, #187) Bugfixes: - Fix whiplash algorithm when using goals (@swrobel, #193) Misc: - Refactor dashboard js (@buddhamagnet) ## 0.6.3 (July 8, 2013) Features: - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176) Bugfixes: - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177) ## 0.6.2 (June 6, 2013) Features: - Rails 2.3 compatibility (@bhcarpenter, #167) - Adding possibility to store overridden alternative (@duksis, #173) Misc: - Now testing against multiple versions of rails ## 0.6.1 (May 4, 2013) Bugfixes: - Use the specified algorithm for the experiment instead of the default (@woodhull, #165) Misc: - Ensure experiements are valid when configuring (@ashmckenzie, #159) - Allow arrays to be passed to ab_test (@fenelon, #156) ## 0.6.0 (April 4, 2013) Features: - Support for Ruby 2.0.0 (@phoet, #142) - Multiple Goals (@liujin, #109) - Ignoring IPs using Regular Expressions (@waynemoore, #119) - Added ability to add more bots to the default list (@themgt, #140) - Allow custom configuration of user blocking logic (@phoet , #148) Bugfixes: - Fixed regression in handling of config files (@iangreenleaf, #115) - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67) - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126) Misc: - updated minimum json version requirement - Refactor Yaml Configuration (@rtwomey, #124) - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118) - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko) - Improved Readme (@iangreenleaf @phoet) ## 0.5.0 (January 28, 2013) Features: - Persistence Adapters: Cookies and Session (@patbenatar, #98) - Configure experiments from a hash (@iangreenleaf, #97) - Pluggable sampling algorithms (@woodhull, #105) Bugfixes: - Fixed negative number of non-finished rates (@philnash, #83) - Fixed behaviour of finished(:reset => false) (@philnash, #88) - Only take into consideration positive z-scores (@thomasmaas, #96) - Amended ab_test method to raise ArgumentError if passed integers or symbols as alternatives (@buddhamagnet, #81) ## 0.4.6 (October 28, 2012) Features: - General code quality improvements (@buddhamagnet, #79) Bugfixes: - Don't increment the experiment counter if user has finished (@dimko, #78) - Fixed an incorrect test (@jaywengrow, #74) ## 0.4.5 (August 30, 2012) Bugfixes: - Fixed header gradient in FF/Opera (@philnash, #69) - Fixed reseting of experiment in session (@apsoto, #43) ## 0.4.4 (August 9, 2012) Features: - Allow parameter overrides, even without Redis. (@bhcarpenter, #62) Bugfixes: - Fixes version number always increasing when alternatives are changed (@philnash, #63) - updated guard-rspec to version 1.2 ## 0.4.3 (July 8, 2012) Features: - redis failover now recovers from all redis-related exceptions ## 0.4.2 (June 1, 2012) Features: - Now works with v3.0 of redis gem Bugfixes: - Fixed redis failover on Rubinius ## 0.4.1 (April 6, 2012) Features: - Added configuration option to disable Split testing (@ilyakatz, #45) Bugfixes: - Fix weights for existing experiments (@andreas, #40) - Fixed dashboard range error (@andrew, #42) ## 0.4.0 (March 7, 2012) **IMPORTANT** If using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug. Features: - Experiments now record when they were started (@vrish88, #35) - Old versions of experiments in sessions are now cleaned up - Avoid users participating in multiple experiments at once (#21) Bugfixes: - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34) - confidence_level helper should handle tiny z-scores (#23) ## 0.3.3 (February 16, 2012) Bugfixes: - Fixed redis failover when a block was passed to ab_test (@layflags, #33) ## 0.3.2 (February 12, 2012) Features: - Handle redis errors gracefully (@layflags, #32) ## 0.3.1 (November 19, 2011) Features: - General code tidy up (@ryanlecompte, #22, @mocoso, #28) - Lazy loading data from Redis (@lautis, #25) Bugfixes: - Handle unstarted experiments (@mocoso, #27) - Relaxed Sinatra version requirement (@martinclu, #24) ## 0.3.0 (October 9, 2011) Features: - Redesigned dashboard (@mrappleton, #17) - Use atomic increments in redis for better concurrency (@lautis, #18) - Weighted alternatives Bugfixes: - Fix to allow overriding of experiments that aren't on version 1 ## 0.2.4 (July 18, 2011) Features: - Added option to finished to not reset the users session Bugfixes: - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols ## 0.2.3 (June 26, 2011) Features: - Experiments can now be deleted from the dashboard - ab_test helper now accepts a block - Improved dashboard Bugfixes: - After resetting an experiment, existing users of that experiment will also be reset ## 0.2.2 (June 11, 2011) Features: - Updated redis-namespace requirement to 1.0.3 - Added a configuration object for changing options - Robot regex can now be changed via a configuration options - Added ability to ignore visits from specified IP addresses - Dashboard now shows percentage improvement of alternatives compared to the control - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives Bugfixes: - Saving an experiment multiple times no longer creates duplicate alternatives ## 0.2.1 (May 29, 2011) Bugfixes: - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x ## 0.2.0 (May 29, 2011) Features: - Override an alternative via a url parameter - Experiments can now be reset from the dashboard - The first alternative is now considered the control - General dashboard usability improvements - Robots are ignored and given the control alternative Bugfixes: - Alternatives are now store in a list rather than a set to ensure consistent ordering - Fixed diving by zero errors ## 0.1.1 (May 18, 2011) Bugfixes: - More Robust conversion rate display on dashboard - Ensure `Split::Version` is available everywhere, fixed dashboard ## 0.1.0 (May 17, 2011) Initial Release <MSG> Update CHANGELOG.md <DFF> @@ -1,3 +1,23 @@ +## Unreleased 4.0.0 + +Bugfixes: +- ab_test must return metadata on error or if split is disabled/excluded user (@andrehjr, #622) +- Fix versioned experiments when used with allow_multiple_experiments=control (@andrehjr, #613) +- Only block Pinterest bot (@huoxito, #606) +- Respect experiment defaults when loading experiments in initializer. (@mattwd7, #599) + +Features: +- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625) +- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616) +- Introduce enable/disable experiment cohorting (@robin-phung, #615) +- Add on_experiment_winner_choose callback (@GenaMinenkov, #574) + +Misc: +- Drop support for Ruby < 2.5 (@andrehjr, #627) +- Drop support for Rails < 5 (@andrehkr, #607) +- Bump minimum required redis to 4.2 (@andrehjr, #628) +- Removed repeated loading from config (@robin-phung, #619) + ## 3.4.1 (November 12th, 2019) Bugfixes:
20
Update CHANGELOG.md
0
.md
md
mit
splitrb/split
10070829
<NME> CHANGELOG.md <BEF> ## 3.4.1 (November 12th, 2019) Bugfixes: - Bump minimum required redis to 4.2 (@andrehjr, #628) - Removed repeated loading from config (@robin-phung, #619) - Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632) - Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631) - Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630) - Fix typo of in `Split::Trial` class variable (TomasBarry, #644) - Single HSET to update values, instead of multiple ones (@andrehjr, #640) - Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659) - Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639) - Adding documentation related to what is stored on cookies. (@andrehjr, #634) - Keep railtie defined under the Split gem namespace (@avit, #666) - Update RSpec helper to support block syntax (@clowder, #665) ## 3.4.1 (November 12th, 2019) Bugfixes: - Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) ## 3.4.0 (November 9th, 2019) Features: - Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release. - Make dashboard pagination default "per" param configurable (@alopatin, #597) Bugfixes: - Fix `force_alternative` for experiments with incremented version (@giraffate, #568) - Persist alternative weights (@giraffate, #570) - Combined experiment performance improvements (@gnanou, #575) - Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577) - When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582) Misc: - Remove `rubyforge_project` from gemspec (@giraffate, #583) - Fix URLs to replace http with https (@giraffate , #584) - Lazily include split helpers in ActionController::Base (@hasghari, #586) - Fix unused variable warnings (@andrehjr, #592) - Fix ruby warnings (@andrehjr, #593) - Update rubocop.yml config (@andrehjr, #594) - Add frozen_string_literal to all files that were missing it (@andrehjr, #595) ## 3.3.2 (April 12th, 2019) Features: - Added uptime robot to configuration.rb (@razel1982, #556) - Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555) Bugfixes: - Fix error message interpolation (@hanibash, #553) - Fix Bigdecimal warnings (@agraves, #551) - Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544) - Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr) Misc: - Update travis to add Rails 6 (@edmilton, #559) - Fix broken specs in developement environment (@dougpetronilio, #557) ## 3.3.1 (January 11th, 2019) Features: - Filter some more bots (@janosch-x, #542) Bugfixes: - Fix Dashboard Pagination Helper typo (@cattekin, #541) - Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539) - fix user participating alternative not found (@NaturalHokke, #536) Misc: - Tweak RSpec instructions (@eliotsykes, #540) - Improve README regarding rspec usage (@vermaxik, #538) ## 3.3.0 (August 13th, 2018) Features: - Added pagination for dashboard (@GeorgeGorbanev, #518) - Add Facebot crawler to list of bots (@pfeiffer, #530) - Ignore previewing requests (@pfeiffer, #531) - Fix binding of ignore_filter (@pfeiffer, #533) Bugfixes: - Fix cookie header duplication (@andrehjr, #522) Performance: - Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509) Misc: - Update development dependencies - test rails 5.2 on travis (@lostapathy, #524) - update ruby versions for travis (@lostapathy, #525) ## 3.2.0 (September 21st, 2017) Features: - Allow configuration of how often winning alternatives are recalculated (@patbl, #501) Bugfixes: - Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503) - Fix combined experiments (@semanticart, #502) ## 3.1.1 (August 30th, 2017) Bugfixes: - Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498) Misc: - Document testing with RSpec (@eliotsykes, #495) ## 3.1.0 (August 14th, 2017) Features: - Support for combined experiments (@daviddening, #493) - Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490) - Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487) Bugfixes: - Blocked a few more common bot user agents (@kylerippey, #485) Misc: - Repository Audit by Maintainer.io (@RichardLitt, #484) - Update development dependencies - Test on ruby 2.4.1 - Test compatibility with rails 5.1 - Add uris to metadata section in gemspec ## 3.0.0 (March 30th, 2017) Features: - added block randomization algorithm and specs (@hulleywood, #475) - Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460) Bugfixes: - Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470) - Fix issue where redis isn't required (@tomciopp , #466) Misc: - Avoid variable_size_secure_compare private method (@eliotsykes, #465) ## 2.2.0 (November 11th, 2016) **Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset. Features: - Remove dependency on Redis::Namespace (@bschaeffer, #425) - Make resetting on experiment change optional (@moggyboy, #430) - Add ability to force alternative on dashboard (@ccallebs, #437) Bugfixes: - Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432) Misc: - Remove Explicit Return (@BradHudson, #441) - Update Redis config docs (@bschaeffer, #422) - Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443) - Removed a couple old ruby 1.8 hacks (@andrew, #456) - Run tests on rails 5 (@andrew, #457) - Fixed a few codeclimate warnings (@andrew, #458) - Use codeclimate for test coverage (@andrew #455) ## 2.1.0 (August 8th, 2016) Features: - Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426) ## 2.0.0 (July 17th, 2016) Breaking changes: - Removed deprecated `finished` and `begin_experiment` methods - Namespaced override param to avoid potential clashes (@henrik, #398) ## 1.7.0 (June 28th, 2016) Features: - Running concurrent experiments on same endpoint/view (@karmakaze, #421) ## 1.6.0 (June 16th, 2016) Features: - Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420) ## 1.5.0 (June 8th, 2016) Features: - Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409) - Optional custom persistence adapter (@ndelage, #411) Misc: - Use fakeredis for testing (@andrew, #412) ## 1.4.5 (June 7th, 2016) Bugfixes: - FIX Negative numbers on non-finished (@divineforest, #408) - Eliminate extra RedisAdapter hget (@karmakaze, #407) - Remove unecessary code from Experiment class (@pakallis, #391, #392, #393) Misc: - Simplify Configuration#normalized_experiments (@pakallis, #395) - Clarify test running instructions (@henrik, #397) ## 1.4.4 (May 9th, 2016) Bugfixes: - Increment participation if store override is true and no experiment key exists (@spheric, #380) Misc: - Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389) - Added minimum version requirement to simple-random - Clarify finished with first option being a hash in Readme (@henrik, #382) - Refactoring the User abstraction (@andreibondarev, #384) ## 1.4.3 (April 28th, 2016) Features: - add on_trial callback whenever a trial is started (@mtyeh411, #375) Bugfixes: - Allow algorithm configuration at experiment level (@007sumit, #376) Misc: - only choose override if it exists as valid alternative (@spheric, #377) ## 1.4.2 (April 25th, 2016) Misc: - Deprecated some legacy methods (@andreibondarev, #374) ## 1.4.1 (April 21st, 2016) Bugfixes: - respect manual start configuration after an experiment has been deleted (@mtyeh411, #372) Misc: - Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365) - Revise specs according to http://betterspecs.org/ (@hkliya, #369) ## 1.4.0 (April 2nd, 2016) Features: - Added experiment filters to dashboard (@ccallebs, #363, #364) - Added Contributor Covenant Code of Conduct ## 1.3.2 (January 2nd, 2016) Bugfixes: - Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352) ## 1.3.1 (January 1st, 2016) Bugfixes: - Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349) ## 1.3.0 (October 20th, 2015) Features: - allow for custom redis_url different from ENV variable (@davidgrieser, #323) - add ability to change the length of the persistence cookie (@peterylai, #335) Bugfixes: - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342) - Fix active experiments when experiment is on a later version (@ndrisso, #331) - Fix caching of winning alternative (@nfm, #329) Misc: - Remove duplication from Experiment#save (@pakallis, #333) - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332) ## 1.2.1 (May 17th, 2015) Features: - Handle redis DNS resolution failures gracefully (@fusion2004, #310) - Push metadata to ab_test block (@ekorneeff, #296) - Helper methods are now private when included in controllers (@ipoval, #303) Bugfixes: - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313) - Don't use capture helper from ActionView (@tomasdundacek, #312) Misc: - Remove body "max-width" from dashboard (@xicreative, #299) - fix private for class methods (@ipoval, #301) - minor memoization fix in spec (@ipoval, #304) - Minor documentation fixes (#295, #297, #305, #308) ## 1.2.0 (January 24th, 2015) Features: - Configure redis using environment variables if available (@saratovsource , #293) - Store metadata on experiment configuration (@dekz, #291) Bugfixes: - Revert the Trial#complete! public API to support noargs (@dekz, #292) ## 1.1.0 (January 9th, 2015) Changes: - Public class methods on `Split::Experiment` (e.g., `find_or_create`) have been moved to `Split::ExperimentCatalog`. Features: - Decouple trial from Split::Helper (@joshdover, #286) - Helper method for Active Experiments (@blahblahblah-, #273) Misc: - Use the new travis container based infrastructure for tests (@andrew, #280) ## 1.0.0 (October 12th, 2014) Changes: - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271) ## 0.8.0 (September 25th, 2014) Features: - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251) - support multiple metrics per experiment (@stevenou, #260) Bugfixes: - Avoiding call to params in EncapsulatedHelper (@afn, #257) ## 0.7.3 (September 16th, 2014) Features: - Disable all split tests via a URL parameter (@hwartig, #263) Bugfixes: - Correctly escape experiment names on dashboard (@ecaron, #265) - Handle redis connection exception error properly (@andrew, #245) ## 0.7.2 (June 12th, 2014) Features: - Show metrics on the dashboard (@swrobel, #241) Bugfixes: - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253) - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255) ## 0.7.1 (March 20th, 2014) Features: - You can now reopen experiment from the dashboard (@mikezaby, #235) Misc: - Internal code tidy up (@IanVaughan, #238) ## 0.7.0 (December 26th, 2013) Features: - Significantly improved z-score algorithm (@caser ,#221) - Better sorting of Experiments on dashboard (@wadako111, #218) Bugfixes: - Fixed start button not being displayed in some cases (@vigosan, #219) Misc: - Experiment#initialize refactoring (@nberger, #224) - Extract ExperimentStore into a seperate class (@nberger, #225) ## 0.6.6 (October 15th, 2013) Features: - Sort experiments on Dashboard so "active" ones without a winner appear first (@swrobel, #204) - Starting tests manually (@duksis, #209) Bugfixes: - Only trigger completion callback with valid Trial (@segfaultAX, #208) - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213) Misc: - Added more bots to filter list (@lbeder, #214, #215, #216) ## 0.6.5 (August 23, 2013) Features: - Added Redis adapter for persisting experiments across sessions (@fengb, #203) Misc: - Expand upon algorithms section in README (@swrobel, #200) ## 0.6.4 (August 8, 2013) Features: - Add hooks for experiment deletion and resetting (@craigmcnamara, #198) - Allow Split::Helper to be used outside of a controller (@nfm, #190) - Show current Rails/Rack Env in dashboard (@rceee, #187) Bugfixes: - Fix whiplash algorithm when using goals (@swrobel, #193) Misc: - Refactor dashboard js (@buddhamagnet) ## 0.6.3 (July 8, 2013) Features: - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176) Bugfixes: - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177) ## 0.6.2 (June 6, 2013) Features: - Rails 2.3 compatibility (@bhcarpenter, #167) - Adding possibility to store overridden alternative (@duksis, #173) Misc: - Now testing against multiple versions of rails ## 0.6.1 (May 4, 2013) Bugfixes: - Use the specified algorithm for the experiment instead of the default (@woodhull, #165) Misc: - Ensure experiements are valid when configuring (@ashmckenzie, #159) - Allow arrays to be passed to ab_test (@fenelon, #156) ## 0.6.0 (April 4, 2013) Features: - Support for Ruby 2.0.0 (@phoet, #142) - Multiple Goals (@liujin, #109) - Ignoring IPs using Regular Expressions (@waynemoore, #119) - Added ability to add more bots to the default list (@themgt, #140) - Allow custom configuration of user blocking logic (@phoet , #148) Bugfixes: - Fixed regression in handling of config files (@iangreenleaf, #115) - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67) - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126) Misc: - updated minimum json version requirement - Refactor Yaml Configuration (@rtwomey, #124) - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118) - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko) - Improved Readme (@iangreenleaf @phoet) ## 0.5.0 (January 28, 2013) Features: - Persistence Adapters: Cookies and Session (@patbenatar, #98) - Configure experiments from a hash (@iangreenleaf, #97) - Pluggable sampling algorithms (@woodhull, #105) Bugfixes: - Fixed negative number of non-finished rates (@philnash, #83) - Fixed behaviour of finished(:reset => false) (@philnash, #88) - Only take into consideration positive z-scores (@thomasmaas, #96) - Amended ab_test method to raise ArgumentError if passed integers or symbols as alternatives (@buddhamagnet, #81) ## 0.4.6 (October 28, 2012) Features: - General code quality improvements (@buddhamagnet, #79) Bugfixes: - Don't increment the experiment counter if user has finished (@dimko, #78) - Fixed an incorrect test (@jaywengrow, #74) ## 0.4.5 (August 30, 2012) Bugfixes: - Fixed header gradient in FF/Opera (@philnash, #69) - Fixed reseting of experiment in session (@apsoto, #43) ## 0.4.4 (August 9, 2012) Features: - Allow parameter overrides, even without Redis. (@bhcarpenter, #62) Bugfixes: - Fixes version number always increasing when alternatives are changed (@philnash, #63) - updated guard-rspec to version 1.2 ## 0.4.3 (July 8, 2012) Features: - redis failover now recovers from all redis-related exceptions ## 0.4.2 (June 1, 2012) Features: - Now works with v3.0 of redis gem Bugfixes: - Fixed redis failover on Rubinius ## 0.4.1 (April 6, 2012) Features: - Added configuration option to disable Split testing (@ilyakatz, #45) Bugfixes: - Fix weights for existing experiments (@andreas, #40) - Fixed dashboard range error (@andrew, #42) ## 0.4.0 (March 7, 2012) **IMPORTANT** If using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug. Features: - Experiments now record when they were started (@vrish88, #35) - Old versions of experiments in sessions are now cleaned up - Avoid users participating in multiple experiments at once (#21) Bugfixes: - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34) - confidence_level helper should handle tiny z-scores (#23) ## 0.3.3 (February 16, 2012) Bugfixes: - Fixed redis failover when a block was passed to ab_test (@layflags, #33) ## 0.3.2 (February 12, 2012) Features: - Handle redis errors gracefully (@layflags, #32) ## 0.3.1 (November 19, 2011) Features: - General code tidy up (@ryanlecompte, #22, @mocoso, #28) - Lazy loading data from Redis (@lautis, #25) Bugfixes: - Handle unstarted experiments (@mocoso, #27) - Relaxed Sinatra version requirement (@martinclu, #24) ## 0.3.0 (October 9, 2011) Features: - Redesigned dashboard (@mrappleton, #17) - Use atomic increments in redis for better concurrency (@lautis, #18) - Weighted alternatives Bugfixes: - Fix to allow overriding of experiments that aren't on version 1 ## 0.2.4 (July 18, 2011) Features: - Added option to finished to not reset the users session Bugfixes: - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols ## 0.2.3 (June 26, 2011) Features: - Experiments can now be deleted from the dashboard - ab_test helper now accepts a block - Improved dashboard Bugfixes: - After resetting an experiment, existing users of that experiment will also be reset ## 0.2.2 (June 11, 2011) Features: - Updated redis-namespace requirement to 1.0.3 - Added a configuration object for changing options - Robot regex can now be changed via a configuration options - Added ability to ignore visits from specified IP addresses - Dashboard now shows percentage improvement of alternatives compared to the control - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives Bugfixes: - Saving an experiment multiple times no longer creates duplicate alternatives ## 0.2.1 (May 29, 2011) Bugfixes: - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x ## 0.2.0 (May 29, 2011) Features: - Override an alternative via a url parameter - Experiments can now be reset from the dashboard - The first alternative is now considered the control - General dashboard usability improvements - Robots are ignored and given the control alternative Bugfixes: - Alternatives are now store in a list rather than a set to ensure consistent ordering - Fixed diving by zero errors ## 0.1.1 (May 18, 2011) Bugfixes: - More Robust conversion rate display on dashboard - Ensure `Split::Version` is available everywhere, fixed dashboard ## 0.1.0 (May 17, 2011) Initial Release <MSG> Update CHANGELOG.md <DFF> @@ -1,3 +1,23 @@ +## Unreleased 4.0.0 + +Bugfixes: +- ab_test must return metadata on error or if split is disabled/excluded user (@andrehjr, #622) +- Fix versioned experiments when used with allow_multiple_experiments=control (@andrehjr, #613) +- Only block Pinterest bot (@huoxito, #606) +- Respect experiment defaults when loading experiments in initializer. (@mattwd7, #599) + +Features: +- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625) +- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616) +- Introduce enable/disable experiment cohorting (@robin-phung, #615) +- Add on_experiment_winner_choose callback (@GenaMinenkov, #574) + +Misc: +- Drop support for Ruby < 2.5 (@andrehjr, #627) +- Drop support for Rails < 5 (@andrehkr, #607) +- Bump minimum required redis to 4.2 (@andrehjr, #628) +- Removed repeated loading from config (@robin-phung, #619) + ## 3.4.1 (November 12th, 2019) Bugfixes:
20
Update CHANGELOG.md
0
.md
md
mit
splitrb/split
10070830
<NME> CHANGELOG.md <BEF> ## 3.4.1 (November 12th, 2019) Bugfixes: - Bump minimum required redis to 4.2 (@andrehjr, #628) - Removed repeated loading from config (@robin-phung, #619) - Simplify RedisInterface usage when persisting Experiment alternatives (@andrehjr, #632) - Remove redis_url impl. Deprecated on version 2.2 (@andrehjr, #631) - Remove thread_safe config as redis-rb is thread_safe by default (@andrehjr, #630) - Fix typo of in `Split::Trial` class variable (TomasBarry, #644) - Single HSET to update values, instead of multiple ones (@andrehjr, #640) - Use Redis#hmset to keep compatibility with Redis < 4.0 (@andrehjr, #659) - Remove 'set' parsing for alternatives. Sets were used as storage and deprecated on 0.x (@andrehjr, #639) - Adding documentation related to what is stored on cookies. (@andrehjr, #634) - Keep railtie defined under the Split gem namespace (@avit, #666) - Update RSpec helper to support block syntax (@clowder, #665) ## 3.4.1 (November 12th, 2019) Bugfixes: - Reference ActionController directly when including split helpers, to avoid breaking Rails API Controllers (@andrehjr, #602) ## 3.4.0 (November 9th, 2019) Features: - Improve DualAdapter (@santib, #588), adds a new configuration for the DualAdapter, making it possible to keep consistency for logged_out/logged_in users. It's a opt-in flag. No Behavior was changed on this release. - Make dashboard pagination default "per" param configurable (@alopatin, #597) Bugfixes: - Fix `force_alternative` for experiments with incremented version (@giraffate, #568) - Persist alternative weights (@giraffate, #570) - Combined experiment performance improvements (@gnanou, #575) - Handle correctly case when ab_finished is called before ab_test for a user (@gnanou, #577) - When loading active_experiments, it should not look into user's 'finished' keys (@andrehjr, #582) Misc: - Remove `rubyforge_project` from gemspec (@giraffate, #583) - Fix URLs to replace http with https (@giraffate , #584) - Lazily include split helpers in ActionController::Base (@hasghari, #586) - Fix unused variable warnings (@andrehjr, #592) - Fix ruby warnings (@andrehjr, #593) - Update rubocop.yml config (@andrehjr, #594) - Add frozen_string_literal to all files that were missing it (@andrehjr, #595) ## 3.3.2 (April 12th, 2019) Features: - Added uptime robot to configuration.rb (@razel1982, #556) - Check to see if being run in Rails application and run in before_initialize (@husteadrobert, #555) Bugfixes: - Fix error message interpolation (@hanibash, #553) - Fix Bigdecimal warnings (@agraves, #551) - Avoid hitting up on redis for robots/excluded users. (@andrehjr, #544) - Checks for defined?(request) on Helper#exclude_visitor?. (@andrehjr) Misc: - Update travis to add Rails 6 (@edmilton, #559) - Fix broken specs in developement environment (@dougpetronilio, #557) ## 3.3.1 (January 11th, 2019) Features: - Filter some more bots (@janosch-x, #542) Bugfixes: - Fix Dashboard Pagination Helper typo (@cattekin, #541) - Do not storage alternative in cookie if experiment has a winner (@sadhu89, #539) - fix user participating alternative not found (@NaturalHokke, #536) Misc: - Tweak RSpec instructions (@eliotsykes, #540) - Improve README regarding rspec usage (@vermaxik, #538) ## 3.3.0 (August 13th, 2018) Features: - Added pagination for dashboard (@GeorgeGorbanev, #518) - Add Facebot crawler to list of bots (@pfeiffer, #530) - Ignore previewing requests (@pfeiffer, #531) - Fix binding of ignore_filter (@pfeiffer, #533) Bugfixes: - Fix cookie header duplication (@andrehjr, #522) Performance: - Improve performance of RedisInterface#make_list_length by using LTRIM command (@mlovic, #509) Misc: - Update development dependencies - test rails 5.2 on travis (@lostapathy, #524) - update ruby versions for travis (@lostapathy, #525) ## 3.2.0 (September 21st, 2017) Features: - Allow configuration of how often winning alternatives are recalculated (@patbl, #501) Bugfixes: - Avoid z_score numeric exception for conversion rates >1 (@cmantas, #503) - Fix combined experiments (@semanticart, #502) ## 3.1.1 (August 30th, 2017) Bugfixes: - Bring back support for ruby 1.9.3 and greater (rubygems 2.0.0 or greater now required) (@patbl, #498) Misc: - Document testing with RSpec (@eliotsykes, #495) ## 3.1.0 (August 14th, 2017) Features: - Support for combined experiments (@daviddening, #493) - Rewrite CookieAdapter to work with Rack::Request and Rack::Response directly (@andrehjr, #490) - Enumeration of a User's Experiments that Respects the db_failover Option(@MarkRoddy, #487) Bugfixes: - Blocked a few more common bot user agents (@kylerippey, #485) Misc: - Repository Audit by Maintainer.io (@RichardLitt, #484) - Update development dependencies - Test on ruby 2.4.1 - Test compatibility with rails 5.1 - Add uris to metadata section in gemspec ## 3.0.0 (March 30th, 2017) Features: - added block randomization algorithm and specs (@hulleywood, #475) - Add ab_record_extra_info to allow record extra info to alternative and display on dashboard. (@tranngocsam, #460) Bugfixes: - Avoid crashing on Ruby 2.4 for numeric strings (@flori, #470) - Fix issue where redis isn't required (@tomciopp , #466) Misc: - Avoid variable_size_secure_compare private method (@eliotsykes, #465) ## 2.2.0 (November 11th, 2016) **Backwards incompatible!** Redis keys are renamed. Please make sure all running tests are completed before you upgrade, as they will reset. Features: - Remove dependency on Redis::Namespace (@bschaeffer, #425) - Make resetting on experiment change optional (@moggyboy, #430) - Add ability to force alternative on dashboard (@ccallebs, #437) Bugfixes: - Fix variations reset across page loads for multiple=control and improve coverage (@Vasfed, #432) Misc: - Remove Explicit Return (@BradHudson, #441) - Update Redis config docs (@bschaeffer, #422) - Harden HTTP Basic snippet against timing attacks (@eliotsykes, #443) - Removed a couple old ruby 1.8 hacks (@andrew, #456) - Run tests on rails 5 (@andrew, #457) - Fixed a few codeclimate warnings (@andrew, #458) - Use codeclimate for test coverage (@andrew #455) ## 2.1.0 (August 8th, 2016) Features: - Support REDIS_PROVIDER variable used in Heroku (@kartikluke, #426) ## 2.0.0 (July 17th, 2016) Breaking changes: - Removed deprecated `finished` and `begin_experiment` methods - Namespaced override param to avoid potential clashes (@henrik, #398) ## 1.7.0 (June 28th, 2016) Features: - Running concurrent experiments on same endpoint/view (@karmakaze, #421) ## 1.6.0 (June 16th, 2016) Features: - Add Dual Redis(logged-in)/cookie(logged-out) persistence adapter (@karmakaze, #420) ## 1.5.0 (June 8th, 2016) Features: - Add `expire_seconds:` TTL option to RedisAdapter (@karmakaze, #409) - Optional custom persistence adapter (@ndelage, #411) Misc: - Use fakeredis for testing (@andrew, #412) ## 1.4.5 (June 7th, 2016) Bugfixes: - FIX Negative numbers on non-finished (@divineforest, #408) - Eliminate extra RedisAdapter hget (@karmakaze, #407) - Remove unecessary code from Experiment class (@pakallis, #391, #392, #393) Misc: - Simplify Configuration#normalized_experiments (@pakallis, #395) - Clarify test running instructions (@henrik, #397) ## 1.4.4 (May 9th, 2016) Bugfixes: - Increment participation if store override is true and no experiment key exists (@spheric, #380) Misc: - Deprecated `finished` method in favour of `ab_finished` (@andreibondarev, #389) - Added minimum version requirement to simple-random - Clarify finished with first option being a hash in Readme (@henrik, #382) - Refactoring the User abstraction (@andreibondarev, #384) ## 1.4.3 (April 28th, 2016) Features: - add on_trial callback whenever a trial is started (@mtyeh411, #375) Bugfixes: - Allow algorithm configuration at experiment level (@007sumit, #376) Misc: - only choose override if it exists as valid alternative (@spheric, #377) ## 1.4.2 (April 25th, 2016) Misc: - Deprecated some legacy methods (@andreibondarev, #374) ## 1.4.1 (April 21st, 2016) Bugfixes: - respect manual start configuration after an experiment has been deleted (@mtyeh411, #372) Misc: - Introduce goals collection to reduce complexity of Experiment#save (@pakallis, #365) - Revise specs according to http://betterspecs.org/ (@hkliya, #369) ## 1.4.0 (April 2nd, 2016) Features: - Added experiment filters to dashboard (@ccallebs, #363, #364) - Added Contributor Covenant Code of Conduct ## 1.3.2 (January 2nd, 2016) Bugfixes: - Fix deleting experiments in from the updated dashboard (@craigmcnamara, #352) ## 1.3.1 (January 1st, 2016) Bugfixes: - Fix the dashboard for experiments with ‘/‘ in the name. (@craigmcnamara, #349) ## 1.3.0 (October 20th, 2015) Features: - allow for custom redis_url different from ENV variable (@davidgrieser, #323) - add ability to change the length of the persistence cookie (@peterylai, #335) Bugfixes: - Rescue from Redis::BaseError instead of Redis::CannotConnectError (@nfm, #342) - Fix active experiments when experiment is on a later version (@ndrisso, #331) - Fix caching of winning alternative (@nfm, #329) Misc: - Remove duplication from Experiment#save (@pakallis, #333) - Remove unnecessary argument from Experiment#write_to_alternative (@t4deu, #332) ## 1.2.1 (May 17th, 2015) Features: - Handle redis DNS resolution failures gracefully (@fusion2004, #310) - Push metadata to ab_test block (@ekorneeff, #296) - Helper methods are now private when included in controllers (@ipoval, #303) Bugfixes: - Return an empty hash as metadata when Split is disabled (@tomasdundacek, #313) - Don't use capture helper from ActionView (@tomasdundacek, #312) Misc: - Remove body "max-width" from dashboard (@xicreative, #299) - fix private for class methods (@ipoval, #301) - minor memoization fix in spec (@ipoval, #304) - Minor documentation fixes (#295, #297, #305, #308) ## 1.2.0 (January 24th, 2015) Features: - Configure redis using environment variables if available (@saratovsource , #293) - Store metadata on experiment configuration (@dekz, #291) Bugfixes: - Revert the Trial#complete! public API to support noargs (@dekz, #292) ## 1.1.0 (January 9th, 2015) Changes: - Public class methods on `Split::Experiment` (e.g., `find_or_create`) have been moved to `Split::ExperimentCatalog`. Features: - Decouple trial from Split::Helper (@joshdover, #286) - Helper method for Active Experiments (@blahblahblah-, #273) Misc: - Use the new travis container based infrastructure for tests (@andrew, #280) ## 1.0.0 (October 12th, 2014) Changes: - Remove support for Ruby 1.8.7 and Rails 2.3 (@qpowell, #271) ## 0.8.0 (September 25th, 2014) Features: - Added new way to calculate the probability an alternative is the winner (@caser, #266, #251) - support multiple metrics per experiment (@stevenou, #260) Bugfixes: - Avoiding call to params in EncapsulatedHelper (@afn, #257) ## 0.7.3 (September 16th, 2014) Features: - Disable all split tests via a URL parameter (@hwartig, #263) Bugfixes: - Correctly escape experiment names on dashboard (@ecaron, #265) - Handle redis connection exception error properly (@andrew, #245) ## 0.7.2 (June 12th, 2014) Features: - Show metrics on the dashboard (@swrobel, #241) Bugfixes: - Avoid nil error with ExperimentCatalog when upgrading (@danielschwartz, #253) - [SECURITY ISSUE] Only allow known alternatives as query param overrides (@ankane, #255) ## 0.7.1 (March 20th, 2014) Features: - You can now reopen experiment from the dashboard (@mikezaby, #235) Misc: - Internal code tidy up (@IanVaughan, #238) ## 0.7.0 (December 26th, 2013) Features: - Significantly improved z-score algorithm (@caser ,#221) - Better sorting of Experiments on dashboard (@wadako111, #218) Bugfixes: - Fixed start button not being displayed in some cases (@vigosan, #219) Misc: - Experiment#initialize refactoring (@nberger, #224) - Extract ExperimentStore into a seperate class (@nberger, #225) ## 0.6.6 (October 15th, 2013) Features: - Sort experiments on Dashboard so "active" ones without a winner appear first (@swrobel, #204) - Starting tests manually (@duksis, #209) Bugfixes: - Only trigger completion callback with valid Trial (@segfaultAX, #208) - Fixed bug with `resettable` when using `normalize_experiments` (@jonashuckestein, #213) Misc: - Added more bots to filter list (@lbeder, #214, #215, #216) ## 0.6.5 (August 23, 2013) Features: - Added Redis adapter for persisting experiments across sessions (@fengb, #203) Misc: - Expand upon algorithms section in README (@swrobel, #200) ## 0.6.4 (August 8, 2013) Features: - Add hooks for experiment deletion and resetting (@craigmcnamara, #198) - Allow Split::Helper to be used outside of a controller (@nfm, #190) - Show current Rails/Rack Env in dashboard (@rceee, #187) Bugfixes: - Fix whiplash algorithm when using goals (@swrobel, #193) Misc: - Refactor dashboard js (@buddhamagnet) ## 0.6.3 (July 8, 2013) Features: - Add hooks for Trial#choose! and Trial#complete! (@bmarini, #176) Bugfixes: - Stores and parses Experiment's start_time as a UNIX integer (@joeroot, #177) ## 0.6.2 (June 6, 2013) Features: - Rails 2.3 compatibility (@bhcarpenter, #167) - Adding possibility to store overridden alternative (@duksis, #173) Misc: - Now testing against multiple versions of rails ## 0.6.1 (May 4, 2013) Bugfixes: - Use the specified algorithm for the experiment instead of the default (@woodhull, #165) Misc: - Ensure experiements are valid when configuring (@ashmckenzie, #159) - Allow arrays to be passed to ab_test (@fenelon, #156) ## 0.6.0 (April 4, 2013) Features: - Support for Ruby 2.0.0 (@phoet, #142) - Multiple Goals (@liujin, #109) - Ignoring IPs using Regular Expressions (@waynemoore, #119) - Added ability to add more bots to the default list (@themgt, #140) - Allow custom configuration of user blocking logic (@phoet , #148) Bugfixes: - Fixed regression in handling of config files (@iangreenleaf, #115) - Fixed completion rate increases for experiments users aren't participating in (@philnash, #67) - Handle exceptions from invalid JSON in cookies (@iangreenleaf, #126) Misc: - updated minimum json version requirement - Refactor Yaml Configuration (@rtwomey, #124) - Refactoring of Experiments (@iangreenleaf @tamird, #117 #118) - Added more known Bots, including Pingdom, Bing, YandexBot (@julesie, @zinkkrysty, @dimko) - Improved Readme (@iangreenleaf @phoet) ## 0.5.0 (January 28, 2013) Features: - Persistence Adapters: Cookies and Session (@patbenatar, #98) - Configure experiments from a hash (@iangreenleaf, #97) - Pluggable sampling algorithms (@woodhull, #105) Bugfixes: - Fixed negative number of non-finished rates (@philnash, #83) - Fixed behaviour of finished(:reset => false) (@philnash, #88) - Only take into consideration positive z-scores (@thomasmaas, #96) - Amended ab_test method to raise ArgumentError if passed integers or symbols as alternatives (@buddhamagnet, #81) ## 0.4.6 (October 28, 2012) Features: - General code quality improvements (@buddhamagnet, #79) Bugfixes: - Don't increment the experiment counter if user has finished (@dimko, #78) - Fixed an incorrect test (@jaywengrow, #74) ## 0.4.5 (August 30, 2012) Bugfixes: - Fixed header gradient in FF/Opera (@philnash, #69) - Fixed reseting of experiment in session (@apsoto, #43) ## 0.4.4 (August 9, 2012) Features: - Allow parameter overrides, even without Redis. (@bhcarpenter, #62) Bugfixes: - Fixes version number always increasing when alternatives are changed (@philnash, #63) - updated guard-rspec to version 1.2 ## 0.4.3 (July 8, 2012) Features: - redis failover now recovers from all redis-related exceptions ## 0.4.2 (June 1, 2012) Features: - Now works with v3.0 of redis gem Bugfixes: - Fixed redis failover on Rubinius ## 0.4.1 (April 6, 2012) Features: - Added configuration option to disable Split testing (@ilyakatz, #45) Bugfixes: - Fix weights for existing experiments (@andreas, #40) - Fixed dashboard range error (@andrew, #42) ## 0.4.0 (March 7, 2012) **IMPORTANT** If using ruby 1.8.x and weighted alternatives you should always pass the control alternative through as the second argument with any other alternatives as a third argument because the order of the hash is not preserved in ruby 1.8, ruby 1.9 users are not affected by this bug. Features: - Experiments now record when they were started (@vrish88, #35) - Old versions of experiments in sessions are now cleaned up - Avoid users participating in multiple experiments at once (#21) Bugfixes: - Overriding alternatives doesn't work for weighted alternatives (@layflags, #34) - confidence_level helper should handle tiny z-scores (#23) ## 0.3.3 (February 16, 2012) Bugfixes: - Fixed redis failover when a block was passed to ab_test (@layflags, #33) ## 0.3.2 (February 12, 2012) Features: - Handle redis errors gracefully (@layflags, #32) ## 0.3.1 (November 19, 2011) Features: - General code tidy up (@ryanlecompte, #22, @mocoso, #28) - Lazy loading data from Redis (@lautis, #25) Bugfixes: - Handle unstarted experiments (@mocoso, #27) - Relaxed Sinatra version requirement (@martinclu, #24) ## 0.3.0 (October 9, 2011) Features: - Redesigned dashboard (@mrappleton, #17) - Use atomic increments in redis for better concurrency (@lautis, #18) - Weighted alternatives Bugfixes: - Fix to allow overriding of experiments that aren't on version 1 ## 0.2.4 (July 18, 2011) Features: - Added option to finished to not reset the users session Bugfixes: - Only allow strings as alternatives, fixes strange errors when passing true/false or symbols ## 0.2.3 (June 26, 2011) Features: - Experiments can now be deleted from the dashboard - ab_test helper now accepts a block - Improved dashboard Bugfixes: - After resetting an experiment, existing users of that experiment will also be reset ## 0.2.2 (June 11, 2011) Features: - Updated redis-namespace requirement to 1.0.3 - Added a configuration object for changing options - Robot regex can now be changed via a configuration options - Added ability to ignore visits from specified IP addresses - Dashboard now shows percentage improvement of alternatives compared to the control - If the alternatives of an experiment are changed it resets the experiment and uses the new alternatives Bugfixes: - Saving an experiment multiple times no longer creates duplicate alternatives ## 0.2.1 (May 29, 2011) Bugfixes: - Convert legacy sets to lists to avoid exceptions during upgrades from 0.1.x ## 0.2.0 (May 29, 2011) Features: - Override an alternative via a url parameter - Experiments can now be reset from the dashboard - The first alternative is now considered the control - General dashboard usability improvements - Robots are ignored and given the control alternative Bugfixes: - Alternatives are now store in a list rather than a set to ensure consistent ordering - Fixed diving by zero errors ## 0.1.1 (May 18, 2011) Bugfixes: - More Robust conversion rate display on dashboard - Ensure `Split::Version` is available everywhere, fixed dashboard ## 0.1.0 (May 17, 2011) Initial Release <MSG> Update CHANGELOG.md <DFF> @@ -1,3 +1,23 @@ +## Unreleased 4.0.0 + +Bugfixes: +- ab_test must return metadata on error or if split is disabled/excluded user (@andrehjr, #622) +- Fix versioned experiments when used with allow_multiple_experiments=control (@andrehjr, #613) +- Only block Pinterest bot (@huoxito, #606) +- Respect experiment defaults when loading experiments in initializer. (@mattwd7, #599) + +Features: +- Make goals accessible via on_trial_complete callbacks (@robin-phung, #625) +- Replace usage of SimpleRandom with RubyStats(Used for Beta Distribution RNG) (@andrehjr, #616) +- Introduce enable/disable experiment cohorting (@robin-phung, #615) +- Add on_experiment_winner_choose callback (@GenaMinenkov, #574) + +Misc: +- Drop support for Ruby < 2.5 (@andrehjr, #627) +- Drop support for Rails < 5 (@andrehkr, #607) +- Bump minimum required redis to 4.2 (@andrehjr, #628) +- Removed repeated loading from config (@robin-phung, #619) + ## 3.4.1 (November 12th, 2019) Bugfixes:
20
Update CHANGELOG.md
0
.md
md
mit
splitrb/split
10070831
<NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' this.message = options.message; this.icon = options.icon; this.timestamp = Date.now(); this.duration = 2400; this.hovered = false; this.manifest = {}; $('#meows').append($(document.createElement('div')) .attr('id', 'meow-' + this.timestamp) .addClass('meow') .html($(document.createElement('div')).addClass('inner').text(this.message)) .hide() .fadeIn(400)); this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); title, message, icon, message_type; if (typeof options.title === 'string') { title = options.title; if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (typeof options.icon === 'string') { icon = options.icon; } return { trigger: trigger, message: message, icon: icon, message_type: message_type } }, this.timeout = window.setTimeout(function () { // Make sure this meow hasn't already been destroyed if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow if (that.hovered !== true && typeof that === 'object') { that.destroy(); } } }, that.duration); } this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); } } }); }); } }; }; $.fn.meow = function (args) { var meow = new Meow(args); meows.add(meow); return meow; }; $.meow = $.fn.meow; }(jQuery, window)); <MSG> configurable duration, correcting title parameter, allowing use of html inside box <DFF> @@ -33,13 +33,13 @@ this.message = options.message; this.icon = options.icon; this.timestamp = Date.now(); - this.duration = 2400; + this.duration = options.duration || 2400; this.hovered = false; this.manifest = {}; $('#meows').append($(document.createElement('div')) .attr('id', 'meow-' + this.timestamp) .addClass('meow') - .html($(document.createElement('div')).addClass('inner').text(this.message)) + .html($(document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); @@ -94,7 +94,8 @@ title, message, icon, - message_type; + message_type, + duration; if (typeof options.title === 'string') { title = options.title; @@ -125,10 +126,15 @@ if (typeof options.icon === 'string') { icon = options.icon; } + + duration = options.duration; + return { trigger: trigger, message: message, icon: icon, + title: title, + duration: duration, message_type: message_type } },
9
configurable duration, correcting title parameter, allowing use of html inside box
3
.js
meow
mit
zacstewart/Meow
10070832
<NME> jquery.meow.js <BEF> (function ($, window) { 'use strict'; // Meow queue var default_meow_area, meows = { queue: {}, add: function (meow) { this.queue[meow.timestamp] = meow; }, get: function (timestamp) { return this.queue[timestamp]; }, remove: function (timestamp) { delete this.queue[timestamp]; }, size: function () { var timestamp, size = 0; for (timestamp in this.queue) { if (this.queue.hasOwnProperty(timestamp)) { size += 1; } } return size; } }, // Meow constructor Meow = function (options) { var that = this; this.timestamp = new Date().getTime(); // used to identify this meow and timeout this.hovered = false; // whether mouse is over or not if (typeof default_meow_area === 'undefined' this.message = options.message; this.icon = options.icon; this.timestamp = Date.now(); this.duration = 2400; this.hovered = false; this.manifest = {}; $('#meows').append($(document.createElement('div')) .attr('id', 'meow-' + this.timestamp) .addClass('meow') .html($(document.createElement('div')).addClass('inner').text(this.message)) .hide() .fadeIn(400)); this.container = $(options.container); } else { this.container = default_meow_area; } if (typeof options.title === 'string') { this.title = options.title; } if (typeof options.message === 'string') { this.message = options.message; } else if (options.message instanceof $) { if (options.message.is('input,textarea,select')) { this.message = options.message.val(); } else { this.message = options.message.text(); } if (typeof this.title === 'undefined' && typeof options.message.attr('title') === 'string') { this.title = options.message.attr('title'); } } if (typeof options.icon === 'string') { this.icon = options.icon; } if (options.sticky) { this.duration = Infinity; } else { this.duration = options.duration || 5000; } // Call callback if it's defined (this = meow object) if (typeof options.beforeCreate === 'function') { options.beforeCreate.call(that); } // Add the meow to the meow area this.container.append($(window.document.createElement('div')) .attr('id', 'meow-' + this.timestamp.toString()) .addClass('meow') .html($(window.document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); this.manifest = $('#meow-' + this.timestamp.toString()); title, message, icon, message_type; if (typeof options.title === 'string') { title = options.title; if (typeof that.icon === 'string') { this.manifest.find('.inner').prepend( $(window.document.createElement('div')).addClass('icon').html( $(window.document.createElement('img')).attr('src', this.icon) ) ); } // Add close button if the meow isn't uncloseable // TODO: this close button needs to be much prettier if (options.closeable !== false) { this.manifest.find('.inner').prepend( $(window.document.createElement('a')) .addClass('close') .html('&times;') .attr('href', '#close-meow-' + that.timestamp) .click(function (e) { e.preventDefault(); that.destroy(); }) ); } this.manifest.bind('mouseenter mouseleave', function (event) { if (typeof options.icon === 'string') { icon = options.icon; } return { trigger: trigger, message: message, icon: icon, message_type: message_type } }, this.timeout = window.setTimeout(function () { // Make sure this meow hasn't already been destroyed if (typeof meows.get(that.timestamp) !== 'undefined') { // Call callback if it's defined (this = meow DOM element) if (typeof options.onTimeout === 'function') { options.onTimeout.call(that.manifest); } // Don't destroy if user is hovering over meow if (that.hovered !== true && typeof that === 'object') { that.destroy(); } } }, that.duration); } this.destroy = function () { if (that.destroyed !== true) { // Call callback if it's defined (this = meow DOM element) if (typeof options.beforeDestroy === 'function') { options.beforeDestroy.call(that.manifest); } that.manifest.find('.inner').fadeTo(400, 0, function () { that.manifest.slideUp(function () { that.manifest.remove(); that.destroyed = true; meows.remove(that.timestamp); if (typeof options.afterDestroy === 'function') { options.afterDestroy.call(null); } if (meows.size() <= 0) { if (default_meow_area instanceof $) { default_meow_area.remove(); default_meow_area = undefined; } if (typeof options.afterDestroyLast === 'function') { options.afterDestroyLast.call(null); } } }); }); } }; }; $.fn.meow = function (args) { var meow = new Meow(args); meows.add(meow); return meow; }; $.meow = $.fn.meow; }(jQuery, window)); <MSG> configurable duration, correcting title parameter, allowing use of html inside box <DFF> @@ -33,13 +33,13 @@ this.message = options.message; this.icon = options.icon; this.timestamp = Date.now(); - this.duration = 2400; + this.duration = options.duration || 2400; this.hovered = false; this.manifest = {}; $('#meows').append($(document.createElement('div')) .attr('id', 'meow-' + this.timestamp) .addClass('meow') - .html($(document.createElement('div')).addClass('inner').text(this.message)) + .html($(document.createElement('div')).addClass('inner').html(this.message)) .hide() .fadeIn(400)); @@ -94,7 +94,8 @@ title, message, icon, - message_type; + message_type, + duration; if (typeof options.title === 'string') { title = options.title; @@ -125,10 +126,15 @@ if (typeof options.icon === 'string') { icon = options.icon; } + + duration = options.duration; + return { trigger: trigger, message: message, icon: icon, + title: title, + duration: duration, message_type: message_type } },
9
configurable duration, correcting title parameter, allowing use of html inside box
3
.js
meow
mit
zacstewart/Meow
10070833
<NME> user_spec.rb <BEF> require 'spec_helper' require 'split/experiment_catalog' require 'split/experiment' require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it 'does not remove other keys' do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context '#cleanup_old_experiments!' do it 'removes key if experiment is not found' do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #679 from splitrb/fix-layout-offenses Fix all Layout issues on the project <DFF> @@ -1,3 +1,5 @@ +# frozen_string_literal: true + require 'spec_helper' require 'split/experiment_catalog' require 'split/experiment' @@ -37,7 +39,7 @@ describe Split::User do it 'does not remove other keys' do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end - end + end context '#cleanup_old_experiments!' do it 'removes key if experiment is not found' do @@ -100,7 +102,6 @@ describe Split::User do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end - end context "instantiated with custom adapter" do @@ -114,5 +115,4 @@ describe Split::User do expect(@subject.user).to eq(custom_adapter) end end - end
3
Merge pull request #679 from splitrb/fix-layout-offenses
3
.rb
rb
mit
splitrb/split
10070834
<NME> user_spec.rb <BEF> require 'spec_helper' require 'split/experiment_catalog' require 'split/experiment' require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it 'does not remove other keys' do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context '#cleanup_old_experiments!' do it 'removes key if experiment is not found' do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #679 from splitrb/fix-layout-offenses Fix all Layout issues on the project <DFF> @@ -1,3 +1,5 @@ +# frozen_string_literal: true + require 'spec_helper' require 'split/experiment_catalog' require 'split/experiment' @@ -37,7 +39,7 @@ describe Split::User do it 'does not remove other keys' do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end - end + end context '#cleanup_old_experiments!' do it 'removes key if experiment is not found' do @@ -100,7 +102,6 @@ describe Split::User do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end - end context "instantiated with custom adapter" do @@ -114,5 +115,4 @@ describe Split::User do expect(@subject.user).to eq(custom_adapter) end end - end
3
Merge pull request #679 from splitrb/fix-layout-offenses
3
.rb
rb
mit
splitrb/split
10070835
<NME> user_spec.rb <BEF> require 'spec_helper' require 'split/experiment_catalog' require 'split/experiment' require "split/user" describe Split::User do let(:user_keys) { { "link_color" => "blue" } } let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new("link_color") } before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it 'does not remove other keys' do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context '#cleanup_old_experiments!' do it 'removes key if experiment is not found' do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #679 from splitrb/fix-layout-offenses Fix all Layout issues on the project <DFF> @@ -1,3 +1,5 @@ +# frozen_string_literal: true + require 'spec_helper' require 'split/experiment_catalog' require 'split/experiment' @@ -37,7 +39,7 @@ describe Split::User do it 'does not remove other keys' do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end - end + end context '#cleanup_old_experiments!' do it 'removes key if experiment is not found' do @@ -100,7 +102,6 @@ describe Split::User do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end - end context "instantiated with custom adapter" do @@ -114,5 +115,4 @@ describe Split::User do expect(@subject.user).to eq(custom_adapter) end end - end
3
Merge pull request #679 from splitrb/fix-layout-offenses
3
.rb
rb
mit
splitrb/split
10070836
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" describe Split::User do let(:user_keys) { { 'link_color' => 'blue' } } let(:context) { double(:session => { split: user_keys }) } let(:experiment) { Split::Experiment.new('link_color') } before(:each) do before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it "does not remove other keys" do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #678 from splitrb/fix-hash-syntax-offenses Fix Style/HashSyntax offenses <DFF> @@ -5,7 +5,7 @@ require 'split/user' describe Split::User do let(:user_keys) { { 'link_color' => 'blue' } } - let(:context) { double(:session => { split: user_keys }) } + let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new('link_color') } before(:each) do
1
Merge pull request #678 from splitrb/fix-hash-syntax-offenses
1
.rb
rb
mit
splitrb/split
10070837
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" describe Split::User do let(:user_keys) { { 'link_color' => 'blue' } } let(:context) { double(:session => { split: user_keys }) } let(:experiment) { Split::Experiment.new('link_color') } before(:each) do before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it "does not remove other keys" do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #678 from splitrb/fix-hash-syntax-offenses Fix Style/HashSyntax offenses <DFF> @@ -5,7 +5,7 @@ require 'split/user' describe Split::User do let(:user_keys) { { 'link_color' => 'blue' } } - let(:context) { double(:session => { split: user_keys }) } + let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new('link_color') } before(:each) do
1
Merge pull request #678 from splitrb/fix-hash-syntax-offenses
1
.rb
rb
mit
splitrb/split
10070838
<NME> user_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" require "split/experiment_catalog" describe Split::User do let(:user_keys) { { 'link_color' => 'blue' } } let(:context) { double(:session => { split: user_keys }) } let(:experiment) { Split::Experiment.new('link_color') } before(:each) do before(:each) do @subject = described_class.new(context) end it "delegates methods correctly" do expect(@subject["link_color"]).to eq(@subject.user["link_color"]) end context "#cleanup_old_versions!" do let(:experiment_version) { "#{experiment.name}:1" } let(:second_experiment_version) { "#{experiment.name}_another:1" } let(:third_experiment_version) { "variation_of_#{experiment.name}:1" } let(:user_keys) do { experiment_version => "blue", second_experiment_version => "red", third_experiment_version => "yellow" } end before(:each) { @subject.cleanup_old_versions!(experiment) } it "removes key if old experiment is found" do expect(@subject.keys).not_to include(experiment_version) end it "does not remove other keys" do expect(@subject.keys).to include(second_experiment_version, third_experiment_version) end end context "#cleanup_old_experiments!" do it "removes key if experiment is not found" do @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(true) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end it "removes key if experiment has not started yet" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to be_empty end context "with finished key" do let(:user_keys) { { "link_color" => "blue", "link_color:finished" => true } } it "does not remove finished key for experiment without a winner" do allow(Split::ExperimentCatalog).to receive(:find).with("link_color").and_return(experiment) allow(Split::ExperimentCatalog).to receive(:find).with("link_color:finished").and_return(nil) allow(experiment).to receive(:start_time).and_return(Date.today) allow(experiment).to receive(:has_winner?).and_return(false) @subject.cleanup_old_experiments! expect(@subject.keys).to include("link_color") expect(@subject.keys).to include("link_color:finished") end end context "when already cleaned up" do before do @subject.cleanup_old_experiments! end it "does not clean up again" do expect(@subject).to_not receive(:keys_without_finished) @subject.cleanup_old_experiments! end end end context "allows user to be loaded from adapter" do it "loads user from adapter (RedisAdapter)" do user = Split::Persistence::RedisAdapter.new(nil, 112233) user["foo"] = "bar" ab_user = Split::User.find(112233, :redis) expect(ab_user["foo"]).to eql("bar") end it "returns nil if adapter does not implement a finder method" do ab_user = Split::User.find(112233, :dual_adapter) expect(ab_user).to be_nil end end context "instantiated with custom adapter" do let(:custom_adapter) { double(:persistence_adapter) } before do @subject = described_class.new(context, custom_adapter) end it "sets user to the custom adapter" do expect(@subject.user).to eq(custom_adapter) end end end <MSG> Merge pull request #678 from splitrb/fix-hash-syntax-offenses Fix Style/HashSyntax offenses <DFF> @@ -5,7 +5,7 @@ require 'split/user' describe Split::User do let(:user_keys) { { 'link_color' => 'blue' } } - let(:context) { double(:session => { split: user_keys }) } + let(:context) { double(session: { split: user_keys }) } let(:experiment) { Split::Experiment.new('link_color') } before(:each) do
1
Merge pull request #678 from splitrb/fix-hash-syntax-offenses
1
.rb
rb
mit
splitrb/split
10070839
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) "shared/#{alternative}" end.should eq('shared/blue') end end describe 'finished' do end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Allow parameter overrides, even without Redis. With this option, applications can be tested to ensure that the options render correctly, even without Redis installed and running on the development machines. <DFF> @@ -410,6 +410,27 @@ describe Split::Helper do "shared/#{alternative}" end.should eq('shared/blue') end + + context 'and db_failover_allow_parameter_override config option is turned on' do + before(:each) do + Split.configure do |config| + config.db_failover_allow_parameter_override = true + end + end + + context 'and given an override parameter' do + it 'should use given override instead of the first alternative' do + @params = {'link_color' => 'red'} + ab_test('link_color', 'blue', 'red').should eq('red') + ab_test('link_color', 'blue', 'red', 'green').should eq('red') + ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red') + ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red') + ab_test('link_color', 'blue', 'red') do |alternative| + "shared/#{alternative}" + end.should eq('shared/red') + end + end + end end describe 'finished' do
21
Allow parameter overrides, even without Redis.
0
.rb
rb
mit
splitrb/split
10070840
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) "shared/#{alternative}" end.should eq('shared/blue') end end describe 'finished' do end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Allow parameter overrides, even without Redis. With this option, applications can be tested to ensure that the options render correctly, even without Redis installed and running on the development machines. <DFF> @@ -410,6 +410,27 @@ describe Split::Helper do "shared/#{alternative}" end.should eq('shared/blue') end + + context 'and db_failover_allow_parameter_override config option is turned on' do + before(:each) do + Split.configure do |config| + config.db_failover_allow_parameter_override = true + end + end + + context 'and given an override parameter' do + it 'should use given override instead of the first alternative' do + @params = {'link_color' => 'red'} + ab_test('link_color', 'blue', 'red').should eq('red') + ab_test('link_color', 'blue', 'red', 'green').should eq('red') + ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red') + ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red') + ab_test('link_color', 'blue', 'red') do |alternative| + "shared/#{alternative}" + end.should eq('shared/red') + end + end + end end describe 'finished' do
21
Allow parameter overrides, even without Redis.
0
.rb
rb
mit
splitrb/split
10070841
<NME> helper_spec.rb <BEF> # frozen_string_literal: true require "spec_helper" # TODO change some of these tests to use Rack::Test describe Split::Helper do include Split::Helper let(:experiment) { Split::ExperimentCatalog.find_or_create("link_color", "blue", "red") } describe "ab_test" do it "should not raise an error when passed strings for alternatives" do expect { ab_test("xyz", "1", "2", "3") }.not_to raise_error end it "should not raise an error when passed an array for alternatives" do expect { ab_test("xyz", ["1", "2", "3"]) }.not_to raise_error end it "should raise the appropriate error when passed integers for alternatives" do expect { ab_test("xyz", 1, 2, 3) }.to raise_error(ArgumentError) end it "should raise the appropriate error when passed symbols for alternatives" do expect { ab_test("xyz", :a, :b, :c) }.to raise_error(ArgumentError) end it "should not raise error when passed an array for goals" do expect { ab_test({ "link_color" => ["purchase", "refund"] }, "blue", "red") }.not_to raise_error end it "should not raise error when passed just one goal" do expect { ab_test({ "link_color" => "purchase" }, "blue", "red") }.not_to raise_error end it "raises an appropriate error when processing combined expirements" do Split.configuration.experiments = { combined_exp_1: { alternatives: [ { name: "control", percent: 50 }, { name: "test-alt", percent: 50 } ], metric: :my_metric, combined_experiments: [:combined_exp_1_sub_1] } } Split::ExperimentCatalog.find_or_create("combined_exp_1") expect { ab_test("combined_exp_1") }.to raise_error(Split::InvalidExperimentsFormatError) end it "should assign a random alternative to a new user when there are an equal number of alternatives assigned" do ab_test("link_color", "blue", "red") expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should increment the participation counter after assignment to a new user" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count + 1) end it "should not increment the counter for an experiment that the user is not participating in" do ab_test("link_color", "blue", "red") e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { # User shouldn't participate in this second experiment ab_test("button_size", "small", "big") }.not_to change { e.participant_count } end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should not increment the counter for an not started experiment" do expect(Split.configuration).to receive(:start_manually).and_return(true) e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") expect { a = ab_test("button_size", "small", "big") expect(a).to eq("small") }.not_to change { e.participant_count } end it "should return the given alternative for an existing user" do expect(ab_test("link_color", "blue", "red")).to eq ab_test("link_color", "blue", "red") end it "should always return the winner if one is present" do experiment.winner = "orange" expect(ab_test("link_color", "blue", "red")).to eq("orange") end it "should allow the alternative to be forced by passing it in the params" do # ?ab_test[link_color]=blue @params = { "ab_test" => { "link_color" => "blue" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") @params = { "ab_test" => { "link_color" => "red" } } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("red") alternative = ab_test("link_color", { "blue" => 5 }, "red" => 1) expect(alternative).to eq("red") end it "should not allow an arbitrary alternative" do @params = { "ab_test" => { "link_color" => "pink" } } alternative = ab_test("link_color", "blue") expect(alternative).to eq("blue") end it "should not store the split when a param forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end it "SPLIT_DISABLE query parameter should also force the alternative (uses control)" do @params = { "SPLIT_DISABLE" => "true" } alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq("blue") alternative = ab_test("link_color", { "blue" => 1 }, "red" => 5) expect(alternative).to eq("blue") alternative = ab_test("link_color", "red", "blue") expect(alternative).to eq("red") alternative = ab_test("link_color", { "red" => 5 }, "blue" => 1) expect(alternative).to eq("red") end it "should not store the split when Split generically disabled" do @params = { "SPLIT_DISABLE" => "true" } expect(ab_user).not_to receive(:[]=) ab_test("link_color", "blue", "red") end context "when store_override is set" do before { Split.configuration.store_override = true } it "should store the forced alternative" do @params = { "ab_test" => { "link_color" => "blue" } } expect(ab_user).to receive(:[]=).with("link_color", "blue") ab_test("link_color", "blue", "red") end end context "when on_trial_choose is set" do before { Split.configuration.on_trial_choose = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_test("link_color", "blue", "red") end end it "should allow passing a block" do alt = ab_test("link_color", "blue", "red") ret = ab_test("link_color", "blue", "red") { |alternative| "shared/#{alternative}" } expect(ret).to eq("shared/#{alt}") end it "should allow the share of visitors see an alternative to be specified" do ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 }) expect(["red", "blue"]).to include(ab_user["link_color"]) end it "should allow alternative weighting interface as a single hash" do ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.alternatives.map(&:name)).to eq(["blue", "red"]) expect(experiment.alternatives.collect { |a| a.weight }).to match_array([0.01, 0.2]) end it "should only let a user participate in one experiment at a time" do link_color = ab_test("link_color", "blue", "red") ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) big = Split::Alternative.new("big", "button_size") expect(big.participant_count).to eq(0) small = Split::Alternative.new("small", "button_size") expect(small.participant_count).to eq(0) end it "should let a user participate in many experiment with allow_multiple_experiments option" do Split.configure do |config| config.allow_multiple_experiments = true end link_color = ab_test("link_color", "blue", "red") button_size = ab_test("button_size", "small", "big") expect(ab_user["link_color"]).to eq(link_color) expect(ab_user["button_size"]).to eq(button_size) button_size_alt = Split::Alternative.new(button_size, "button_size") expect(button_size_alt.participant_count).to eq(1) end context "with allow_multiple_experiments = 'control'" do it "should let a user participate in many experiment with one non-'control' alternative" do Split.configure do |config| config.allow_multiple_experiments = "control" end groups = 100.times.map do |n| ab_test("test#{n}".to_sym, { "control" => (100 - n) }, { "test#{n}-alt" => n }) end experiments = ab_user.active_experiments expect(experiments.size).to be > 1 count_control = experiments.values.count { |g| g == "control" } expect(count_control).to eq(experiments.size - 1) count_alts = groups.count { |g| g != "control" } expect(count_alts).to eq(1) end context "when user already has experiment" do let(:mock_user) { Split::User.new(self, { "test_0" => "test-alt" }) } before do Split.configure do |config| config.allow_multiple_experiments = "control" end Split::ExperimentCatalog.find_or_initialize("test_0", "control", "test-alt").save Split::ExperimentCatalog.find_or_initialize("test_1", "control", "test-alt").save end it "should restore previously selected alternative" do expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 1 }, { "test-alt" => 100 })).to eq "test-alt" end it "should select the correct alternatives after experiment resets" do experiment = Split::ExperimentCatalog.find(:test_0) experiment.reset mock_user[experiment.key] = "test-alt" expect(ab_user.active_experiments.size).to eq 1 expect(ab_test(:test_0, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "test-alt" end it "lets override existing choice" do pending "this requires user store reset on first call not depending on whelther it is current trial" @params = { "ab_test" => { "test_1" => "test-alt" } } expect(ab_test(:test_0, { "control" => 0 }, { "test-alt" => 100 })).to eq "control" expect(ab_test(:test_1, { "control" => 100 }, { "test-alt" => 1 })).to eq "test-alt" end end end it "should not over-write a finished key when an experiment is on a later version" do experiment.increment_version ab_user = { experiment.key => "blue", experiment.finished_key => true } finished_session = ab_user.dup ab_test("link_color", "blue", "red") expect(ab_user).to eq(finished_session) end end describe "metadata" do context "is defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: { "one" => "Meta1", "two" => "Meta2" } } } end it "should be passed to helper block" do @params = { "ab_test" => { "my_experiment" => "two" } } expect(ab_test("my_experiment")).to eq "two" expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq("Meta2") end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment")).to eq "one" expect(ab_test("my_experiment") do |_, meta| meta end).to eq("Meta1") end end context "is not defined" do before do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, metadata: nil } } end it "should be passed to helper block" do expect(ab_test("my_experiment") do |alternative, meta| meta end).to eq({}) end it "should pass control metadata helper block if library disabled" do Split.configure do |config| config.enabled = false end expect(ab_test("my_experiment") do |_, meta| meta end).to eq({}) end end end describe "ab_finished" do context "for an experiment that the user participates in" do before(:each) do @experiment_name = "link_color" @alternatives = ["blue", "red"] @experiment = Split::ExperimentCatalog.find_or_create(@experiment_name, *@alternatives) @alternative_name = ab_test(@experiment_name, *@alternatives) @previous_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count end it "should increment the counter for the completed alternative" do ab_finished(@experiment_name) new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should set experiment's finished key if reset is false" do ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should not increment the counter if reset is false and the experiment has been already finished" do 2.times { ab_finished(@experiment_name, { reset: false }) } new_completion_count = Split::Alternative.new(@alternative_name, @experiment_name).completed_count expect(new_completion_count).to eq(@previous_completion_count + 1) end it "should not increment the counter for an ended experiment" do e = Split::ExperimentCatalog.find_or_create("button_size", "small", "big") e.winner = "small" a = ab_test("button_size", "small", "big") expect(a).to eq("small") expect { ab_finished("button_size") }.not_to change { Split::Alternative.new(a, "button_size").completed_count } end it "should clear out the user's participation from their session" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should not clear out the users session if reset is false" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name, { reset: false }) expect(ab_user[@experiment.key]).to eq(@alternative_name) expect(ab_user[@experiment.finished_key]).to eq(true) end it "should reset the users session when experiment is not versioned" do expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end it "should reset the users session when experiment is versioned" do @experiment.increment_version @alternative_name = ab_test(@experiment_name, *@alternatives) expect(ab_user[@experiment.key]).to eq(@alternative_name) ab_finished(@experiment_name) expect(ab_user.keys).to be_empty end context "when on_trial_complete is set" do before { Split.configuration.on_trial_complete = :some_method } it "should call the method" do expect(self).to receive(:some_method) ab_finished(@experiment_name) "shared/#{alternative}" end.should eq('shared/blue') end end describe 'finished' do end context "for an experiment that the user does not participate in" do before do Split::ExperimentCatalog.find_or_create(:not_started_experiment, "control", "alt") end it "should not raise an exception" do expect { ab_finished(:not_started_experiment) }.not_to raise_exception end it "should not change the user state when reset is false" do expect { ab_finished(:not_started_experiment, reset: false) }.not_to change { ab_user.keys }.from([]) end it "should not change the user state when reset is true" do expect(self).not_to receive(:reset!) ab_finished(:not_started_experiment) end it "should not increment the completed counter" do ab_finished(:not_started_experiment) expect(Split::Alternative.new("control", :not_started_experiment).completed_count).to eq(0) expect(Split::Alternative.new("alt", :not_started_experiment).completed_count).to eq(0) end end end context "finished with config" do it "passes reset option" do Split.configuration.experiments = { my_experiment: { alternatives: ["one", "two"], resettable: false, } } alternative = ab_test(:my_experiment) experiment = Split::ExperimentCatalog.find :my_experiment ab_finished :my_experiment expect(ab_user[experiment.key]).to eq(alternative) expect(ab_user[experiment.finished_key]).to eq(true) end end context "finished with metric name" do before { Split.configuration.experiments = {} } before { expect(Split::Alternative).to receive(:new).at_least(1).times.and_call_original } def should_finish_experiment(experiment_name, should_finish = true) alts = Split.configuration.experiments[experiment_name][:alternatives] experiment = Split::ExperimentCatalog.find_or_create(experiment_name, *alts) alt_name = ab_user[experiment.key] = alts.first alt = double("alternative") expect(alt).to receive(:name).at_most(1).times.and_return(alt_name) expect(Split::Alternative).to receive(:new).at_most(1).times.with(alt_name, experiment_name.to_s).and_return(alt) if should_finish expect(alt).to receive(:increment_completion).at_most(1).times else expect(alt).not_to receive(:increment_completion) end end it "completes the test" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], metric: :my_metric } should_finish_experiment :my_experiment ab_finished :my_metric end it "completes all relevant tests" do Split.configuration.experiments = { exp_1: { alternatives: [ "1-1", "1-2" ], metric: :my_metric }, exp_2: { alternatives: [ "2-1", "2-2" ], metric: :another_metric }, exp_3: { alternatives: [ "3-1", "3-2" ], metric: :my_metric }, } should_finish_experiment :exp_1 should_finish_experiment :exp_2, false should_finish_experiment :exp_3 ab_finished :my_metric end it "passes reset option" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, resettable: false, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end it "passes through options" do Split.configuration.experiments = { my_exp: { alternatives: ["one", "two"], metric: :my_metric, } } alternative_name = ab_test(:my_exp) exp = Split::ExperimentCatalog.find :my_exp ab_finished :my_metric, reset: false expect(ab_user[exp.key]).to eq(alternative_name) expect(ab_user[exp.finished_key]).to be_truthy end end describe "conversions" do it "should return a conversion rate for an alternative" do alternative_name = ab_test("link_color", "blue", "red") previous_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(previous_convertion_rate).to eq(0.0) ab_finished("link_color") new_convertion_rate = Split::Alternative.new(alternative_name, "link_color").conversion_rate expect(new_convertion_rate).to eq(1.0) end end describe "active experiments" do it "should show an active test" do alternative = ab_test("def", "4", "5", "6") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show a finished test" do alternative = ab_test("def", "4", "5", "6") ab_finished("def", { reset: false }) expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "def" expect(active_experiments.first[1]).to eq alternative end it "should show an active test when an experiment is on a later version" do experiment.reset expect(experiment.version).to eq(1) ab_test("link_color", "blue", "red") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "link_color" end it "should show versioned tests properly" do 10.times { experiment.reset } alternative = ab_test(experiment.name, "blue", "red") ab_finished(experiment.name, reset: false) expect(experiment.version).to eq(10) expect(active_experiments.count).to eq 1 expect(active_experiments).to eq({ "link_color" => alternative }) end it "should show multiple tests" do Split.configure do |config| config.allow_multiple_experiments = true end alternative = ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 2 expect(active_experiments["def"]).to eq alternative expect(active_experiments["ghi"]).to eq another_alternative end it "should not show tests with winners" do Split.configure do |config| config.allow_multiple_experiments = true end e = Split::ExperimentCatalog.find_or_create("def", "4", "5", "6") e.winner = "4" ab_test("def", "4", "5", "6") another_alternative = ab_test("ghi", "7", "8", "9") expect(active_experiments.count).to eq 1 expect(active_experiments.first[0]).to eq "ghi" expect(active_experiments.first[1]).to eq another_alternative end end describe "when user is a robot" do before(:each) do @request = OpenStruct.new(user_agent: "Googlebot/2.1 (+http://www.google.com/bot.html)") end describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not create a experiment" do ab_test("link_color", "blue", "red") expect(Split::Experiment.new("link_color")).to be_a_new_record end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when providing custom ignore logic" do context "using a proc to configure custom logic" do before(:each) do Split.configure do |c| c.ignore_filter = proc { |request| true } # ignore everything end end it "ignores the ab_test" do ab_test("link_color", "blue", "red") red_count = Split::Alternative.new("red", "link_color").participant_count blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((red_count + blue_count)).to be(0) end end end shared_examples_for "a disabled test" do describe "ab_test" do it "should return the control" do alternative = ab_test("link_color", "blue", "red") expect(alternative).to eq experiment.control.name end it "should not increment the participation count" do previous_red_count = Split::Alternative.new("red", "link_color").participant_count previous_blue_count = Split::Alternative.new("blue", "link_color").participant_count ab_test("link_color", "blue", "red") new_red_count = Split::Alternative.new("red", "link_color").participant_count new_blue_count = Split::Alternative.new("blue", "link_color").participant_count expect((new_red_count + new_blue_count)).to eq(previous_red_count + previous_blue_count) end end describe "finished" do it "should not increment the completed count" do alternative_name = ab_test("link_color", "blue", "red") previous_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count ab_finished("link_color") new_completion_count = Split::Alternative.new(alternative_name, "link_color").completed_count expect(new_completion_count).to eq(previous_completion_count) end end end describe "when ip address is ignored" do context "individually" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.130") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it_behaves_like "a disabled test" end context "for a range" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.129") Split.configure do |c| c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "using both a range and a specific value" do before(:each) do @request = OpenStruct.new(ip: "81.19.48.128") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" c.ignore_ip_addresses << /81\.19\.48\.[0-9]+/ end end it_behaves_like "a disabled test" end context "when ignored other address" do before do @request = OpenStruct.new(ip: "1.1.1.1") Split.configure do |c| c.ignore_ip_addresses << "81.19.48.130" end end it "works as usual" do alternative_name = ab_test("link_color", "red", "blue") expect { ab_finished("link_color") }.to change(Split::Alternative.new(alternative_name, "link_color"), :completed_count).by(1) end end end describe "when user is previewing" do before(:each) do @request = OpenStruct.new(headers: { "x-purpose" => "preview" }) end it_behaves_like "a disabled test" end describe "versioned experiments" do it "should use version zero if no version is present" do alternative_name = ab_test("link_color", "blue", "red") expect(experiment.version).to eq(0) expect(ab_user["link_color"]).to eq(alternative_name) end it "should save the version of the experiment to the session" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) end it "should load the experiment even if the version is not 0" do experiment.reset expect(experiment.version).to eq(1) alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(alternative_name) return_alternative_name = ab_test("link_color", "blue", "red") expect(return_alternative_name).to eq(alternative_name) end it "should reset the session of a user on an older version of the experiment" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) new_alternative = Split::Alternative.new(new_alternative_name, "link_color") expect(new_alternative.participant_count).to eq(1) end it "should cleanup old versions of experiments from the session" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(1) experiment.reset expect(experiment.version).to eq(1) alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.participant_count).to eq(0) new_alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color:1"]).to eq(new_alternative_name) end it "should only count completion of users on the current version" do alternative_name = ab_test("link_color", "blue", "red") expect(ab_user["link_color"]).to eq(alternative_name) Split::Alternative.new(alternative_name, "link_color") experiment.reset expect(experiment.version).to eq(1) ab_finished("link_color") alternative = Split::Alternative.new(alternative_name, "link_color") expect(alternative.completed_count).to eq(0) end end context "when redis is not available" do before(:each) do expect(Split).to receive(:redis).at_most(5).times.and_raise(Errno::ECONNREFUSED.new) end context "and db_failover config option is turned off" do before(:each) do Split.configure do |config| config.db_failover = false end end describe "ab_test" do it "should raise an exception" do expect { ab_test("link_color", "blue", "red") }.to raise_error(Errno::ECONNREFUSED) end end describe "finished" do it "should raise an exception" do expect { ab_finished("link_color") }.to raise_error(Errno::ECONNREFUSED) end end describe "disable split testing" do before(:each) do Split.configure do |config| config.enabled = false end end it "should not attempt to connect to redis" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should return control variable" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect { ab_finished("link_color") }.not_to raise_error end end end context "and db_failover config option is turned on" do before(:each) do Split.configure do |config| config.db_failover = true end end describe "ab_test" do it "should not raise an exception" do expect { ab_test("link_color", "blue", "red") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_test("link_color", "blue", "red") end it "should always use first alternative" do expect(ab_test("link_color", "blue", "red")).to eq("blue") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("blue") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("blue") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/blue") end context "and db_failover_allow_parameter_override config option is turned on" do before(:each) do Split.configure do |config| config.db_failover_allow_parameter_override = true end end context "and given an override parameter" do it "should use given override instead of the first alternative" do @params = { "ab_test" => { "link_color" => "red" } } expect(ab_test("link_color", "blue", "red")).to eq("red") expect(ab_test("link_color", "blue", "red", "green")).to eq("red") expect(ab_test("link_color", { "blue" => 0.01 }, "red" => 0.2)).to eq("red") expect(ab_test("link_color", { "blue" => 0.8 }, { "red" => 20 })).to eq("red") expect(ab_test("link_color", "blue", "red") do |alternative| "shared/#{alternative}" end).to eq("shared/red") end end end context "and preloaded config given" do before do Split.configuration.experiments[:link_color] = { alternatives: [ "blue", "red" ], } end it "uses first alternative" do expect(ab_test(:link_color)).to eq("blue") end end end describe "finished" do it "should not raise an exception" do expect { ab_finished("link_color") }.not_to raise_error end it "should call db_failover_on_db_error proc with error as parameter" do Split.configure do |config| config.db_failover_on_db_error = proc do |error| expect(error).to be_a(Errno::ECONNREFUSED) end end expect(Split.configuration.db_failover_on_db_error).to receive(:call).and_call_original ab_finished("link_color") end end end end context "with preloaded config" do before { Split.configuration.experiments = {} } it "pulls options from config file" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } ab_test :my_experiment expect(Split::Experiment.new(:my_experiment).alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(Split::Experiment.new(:my_experiment).goals).to eq([ "goal1", "goal2" ]) end it "can be called multiple times" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: ["goal1", "goal2"] } 5.times { ab_test :my_experiment } experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "other_opt" ]) expect(experiment.goals).to eq([ "goal1", "goal2" ]) expect(experiment.participant_count).to eq(1) end it "accepts multiple goals" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ], goals: [ "goal1", "goal2", "goal3" ] } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([ "goal1", "goal2", "goal3" ]) end it "allow specifying goals to be optional" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "other_opt" ] } experiment = Split::Experiment.new(:my_experiment) expect(experiment.goals).to eq([]) end it "accepts multiple alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ "control_opt", "second_opt", "third_opt" ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.map(&:name)).to eq([ "control_opt", "second_opt", "third_opt" ]) end it "accepts probability on alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 67 }, { name: "second_opt", percent: 10 }, { name: "third_opt", percent: 23 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) expect(experiment.alternatives.collect { |a| [a.name, a.weight] }).to eq([["control_opt", 0.67], ["second_opt", 0.1], ["third_opt", 0.23]]) end it "accepts probability on some alternatives" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt", percent: 34 }, "second_opt", { name: "third_opt", percent: 23 }, "fourth_opt", ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.34], ["second_opt", 0.215], ["third_opt", 0.23], ["fourth_opt", 0.215]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "allows name param without probability" do Split.configuration.experiments[:my_experiment] = { alternatives: [ { name: "control_opt" }, "second_opt", { name: "third_opt", percent: 64 }, ], } ab_test :my_experiment experiment = Split::Experiment.new(:my_experiment) names_and_weights = experiment.alternatives.collect { |a| [a.name, a.weight] } expect(names_and_weights).to eq([["control_opt", 0.18], ["second_opt", 0.18], ["third_opt", 0.64]]) expect(names_and_weights.inject(0) { |sum, nw| sum + nw[1] }).to eq(1.0) end it "fails gracefully if config is missing experiment" do Split.configuration.experiments = { other_experiment: { foo: "Bar" } } expect { ab_test :my_experiment }.to raise_error(Split::ExperimentNotFound) end it "fails gracefully if config is missing" do expect { Split.configuration.experiments = nil }.to raise_error(Split::InvalidExperimentsFormatError) end it "fails gracefully if config is missing alternatives" do Split.configuration.experiments[:my_experiment] = { foo: "Bar" } expect { ab_test :my_experiment }.to raise_error(NoMethodError) end end it "should handle multiple experiments correctly" do experiment2 = Split::ExperimentCatalog.find_or_create("link_color2", "blue", "red") ab_test("link_color", "blue", "red") ab_test("link_color2", "blue", "red") ab_finished("link_color2") experiment2.alternatives.each do |alt| expect(alt.unfinished_count).to eq(0) end end context "with goals" do before do @experiment = { "link_color" => ["purchase", "refund"] } @alternatives = ["blue", "red"] @experiment_name, @goals = normalize_metric(@experiment) @goal1 = @goals[0] @goal2 = @goals[1] end it "should normalize experiment" do expect(@experiment_name).to eq("link_color") expect(@goals).to eq(["purchase", "refund"]) end describe "ab_test" do it "should allow experiment goals interface as a single hash" do ab_test(@experiment, *@alternatives) experiment = Split::ExperimentCatalog.find("link_color") expect(experiment.goals).to eq(["purchase", "refund"]) end end describe "ab_finished" do before do @alternative_name = ab_test(@experiment, *@alternatives) end it "should increment the counter for the specified-goal completed alternative" do expect { ab_finished({ "link_color" => ["purchase"] }) } .to change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal2) }.by(0) .and change { Split::Alternative.new(@alternative_name, @experiment_name).completed_count(@goal1) }.by(1) end end end end <MSG> Allow parameter overrides, even without Redis. With this option, applications can be tested to ensure that the options render correctly, even without Redis installed and running on the development machines. <DFF> @@ -410,6 +410,27 @@ describe Split::Helper do "shared/#{alternative}" end.should eq('shared/blue') end + + context 'and db_failover_allow_parameter_override config option is turned on' do + before(:each) do + Split.configure do |config| + config.db_failover_allow_parameter_override = true + end + end + + context 'and given an override parameter' do + it 'should use given override instead of the first alternative' do + @params = {'link_color' => 'red'} + ab_test('link_color', 'blue', 'red').should eq('red') + ab_test('link_color', 'blue', 'red', 'green').should eq('red') + ab_test('link_color', {'blue' => 0.01}, 'red' => 0.2).should eq('red') + ab_test('link_color', {'blue' => 0.8}, {'red' => 20}).should eq('red') + ab_test('link_color', 'blue', 'red') do |alternative| + "shared/#{alternative}" + end.should eq('shared/red') + end + end + end end describe 'finished' do
21
Allow parameter overrides, even without Redis.
0
.rb
rb
mit
splitrb/split
10070842
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` ### Rails Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'. ### Sinatra ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #358 from andreibondarev/fix-readme-typo Fix typo in README <DFF> @@ -36,7 +36,7 @@ gem install split ### Rails -Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'. +Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured redis it will 'just work'. ### Sinatra
1
Merge pull request #358 from andreibondarev/fix-readme-typo
1
.md
md
mit
splitrb/split
10070843
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` ### Rails Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'. ### Sinatra ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #358 from andreibondarev/fix-readme-typo Fix typo in README <DFF> @@ -36,7 +36,7 @@ gem install split ### Rails -Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'. +Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured redis it will 'just work'. ### Sinatra
1
Merge pull request #358 from andreibondarev/fix-readme-typo
1
.md
md
mit
splitrb/split
10070844
<NME> README.md <BEF> # [Split](https://libraries.io/rubygems/split) [![Gem Version](https://badge.fury.io/rb/split.svg)](http://badge.fury.io/rb/split) ![Build status](https://github.com/splitrb/split/actions/workflows/ci.yml/badge.svg?branch=main) [![Code Climate](https://codeclimate.com/github/splitrb/split/badges/gpa.svg)](https://codeclimate.com/github/splitrb/split) [![Test Coverage](https://codeclimate.com/github/splitrb/split/badges/coverage.svg)](https://codeclimate.com/github/splitrb/split/coverage) [![standard-readme compliant](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) [![Open Source Helpers](https://www.codetriage.com/splitrb/split/badges/users.svg)](https://www.codetriage.com/splitrb/split) > 📈 The Rack Based A/B testing framework https://libraries.io/rubygems/split Split is a rack based A/B testing framework designed to work with Rails, Sinatra or any other rack based app. Split is heavily inspired by the [Abingo](https://github.com/ryanb/abingo) and [Vanity](https://github.com/assaf/vanity) Rails A/B testing plugins and [Resque](https://github.com/resque/resque) in its use of Redis. Split is designed to be hacker friendly, allowing for maximum customisation and extensibility. ## Install ### Requirements Split v4.0+ is currently tested with Ruby >= 2.5 and Rails >= 5.2. If your project requires compatibility with Ruby 2.4.x or older Rails versions. You can try v3.0 or v0.8.0(for Ruby 1.9.3) Split uses Redis as a datastore. Split only supports Redis 4.0 or greater. If you're on OS X, Homebrew is the simplest way to install Redis: ```bash brew install redis redis-server /usr/local/etc/redis.conf ``` ### Rails Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'. ### Sinatra ``` #### Rails Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured Redis it will 'just work'. #### Sinatra To configure Sinatra with Split you need to enable sessions and mix in the helper methods. Add the following lines at the top of your Sinatra app: ```ruby require 'split' class MySinatraApp < Sinatra::Base enable :sessions helpers Split::Helper get '/' do ... end ``` ## Usage To begin your A/B test use the `ab_test` method, naming your experiment with the first argument and then the different alternatives which you wish to test on as the other arguments. `ab_test` returns one of the alternatives, if a user has already seen that test they will get the same alternative as before, which you can use to split your code on. It can be used to render different templates, show different text or any other case based logic. `ab_finished` is used to make a completion of an experiment, or conversion. Example: View ```erb <% ab_test(:login_button, "/images/button1.jpg", "/images/button2.jpg") do |button_file| %> <%= image_tag(button_file, alt: "Login!") %> <% end %> ``` Example: Controller ```ruby def register_new_user # See what level of free points maximizes users' decision to buy replacement points. @starter_points = ab_test(:new_user_free_points, '100', '200', '300') end ``` Example: Conversion tracking (in a controller!) ```ruby def buy_new_points # some business logic ab_finished(:new_user_free_points) end ``` Example: Conversion tracking (in a view) ```erb Thanks for signing up, dude! <% ab_finished(:signup_page_redesign) %> ``` You can find more examples, tutorials and guides on the [wiki](https://github.com/splitrb/split/wiki). ## Statistical Validity Split has two options for you to use to determine which alternative is the best. The first option (default on the dashboard) uses a z test (n>30) for the difference between your control and alternative conversion rates to calculate statistical significance. This test will tell you whether an alternative is better or worse than your control, but it will not distinguish between which alternative is the best in an experiment with multiple alternatives. Split will only tell you if your experiment is 90%, 95%, or 99% significant, and this test only works if you have more than 30 participants and 5 conversions for each branch. As per this [blog post](https://www.evanmiller.org/how-not-to-run-an-ab-test.html) on the pitfalls of A/B testing, it is highly recommended that you determine your requisite sample size for each branch before running the experiment. Otherwise, you'll have an increased rate of false positives (experiments which show a significant effect where really there is none). [Here](https://www.evanmiller.org/ab-testing/sample-size.html) is a sample size calculator for your convenience. The second option uses simulations from a beta distribution to determine the probability that the given alternative is the winner compared to all other alternatives. You can view these probabilities by clicking on the drop-down menu labeled "Confidence." This option should be used when the experiment has more than just 1 control and 1 alternative. It can also be used for a simple, 2-alternative A/B test. Calculating the beta-distribution simulations for a large number of experiments can be slow, so the results are cached. You can specify how often they should be recalculated (the default is once per day). ```ruby Split.configure do |config| config.winning_alternative_recalculation_interval = 3600 # 1 hour end ``` ## Extras ### Weighted alternatives Perhaps you only want to show an alternative to 10% of your visitors because it is very experimental or not yet fully load tested. To do this you can pass a weight with each alternative in the following ways: ```ruby ab_test(:homepage_design, {'Old' => 18}, {'New' => 2}) ab_test(:homepage_design, 'Old', {'New' => 1.0/9}) ab_test(:homepage_design, {'Old' => 9}, 'New') ``` This will only show the new alternative to visitors 1 in 10 times, the default weight for an alternative is 1. ### Overriding alternatives For development and testing, you may wish to force your app to always return an alternative. You can do this by passing it as a parameter in the url. If you have an experiment called `button_color` with alternatives called `red` and `blue` used on your homepage, a url such as: http://myawesomesite.com?ab_test[button_color]=red will always have red buttons. This won't be stored in your session or count towards to results, unless you set the `store_override` configuration option. In the event you want to disable all tests without having to know the individual experiment names, add a `SPLIT_DISABLE` query parameter. http://myawesomesite.com?SPLIT_DISABLE=true It is not required to send `SPLIT_DISABLE=false` to activate Split. ### Rspec Helper To aid testing with RSpec, write `spec/support/split_helper.rb` and call `use_ab_test(alternatives_by_experiment)` in your specs as instructed below: ```ruby # Create a file with these contents at 'spec/support/split_helper.rb' # and ensure it is `require`d in your rails_helper.rb or spec_helper.rb module SplitHelper # Force a specific experiment alternative to always be returned: # use_ab_test(signup_form: "single_page") # # Force alternatives for multiple experiments: # use_ab_test(signup_form: "single_page", pricing: "show_enterprise_prices") # def use_ab_test(alternatives_by_experiment) allow_any_instance_of(Split::Helper).to receive(:ab_test) do |_receiver, experiment, &block| variant = alternatives_by_experiment.fetch(experiment) { |key| raise "Unknown experiment '#{key}'" } block.call(variant) unless block.nil? variant end end end # Make the `use_ab_test` method available to all specs: RSpec.configure do |config| config.include SplitHelper end ``` Now you can call `use_ab_test(alternatives_by_experiment)` in your specs, for example: ```ruby it "registers using experimental signup" do use_ab_test experiment_name: "alternative_name" post "/signups" ... end ``` ### Starting experiments manually By default new A/B tests will be active right after deployment. In case you would like to start new test a while after the deploy, you can do it by setting the `start_manually` configuration option to `true`. After choosing this option tests won't be started right after deploy, but after pressing the `Start` button in Split admin dashboard. If a test is deleted from the Split dashboard, then it can only be started after pressing the `Start` button whenever being re-initialized. ### Reset after completion When a user completes a test their session is reset so that they may start the test again in the future. To stop this behaviour you can pass the following option to the `ab_finished` method: ```ruby ab_finished(:experiment_name, reset: false) ``` The user will then always see the alternative they started with. Any old unfinished experiment key will be deleted from the user's data storage if the experiment had been removed or is over and a winner had been chosen. This allows a user to enroll into any new experiment in cases when the `allow_multiple_experiments` config option is set to `false`. ### Reset experiments manually By default Split automatically resets the experiment whenever it detects the configuration for an experiment has changed (e.g. you call `ab_test` with different alternatives). You can prevent this by setting the option `reset_manually` to `true`. You may want to do this when you want to change something, like the variants' names, the metadata about an experiment, etc. without resetting everything. ### Multiple experiments at once By default Split will avoid users participating in multiple experiments at once. This means you are less likely to skew results by adding in more variation to your tests. To stop this behaviour and allow users to participate in multiple experiments at once set the `allow_multiple_experiments` config option to true like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = true end ``` This will allow the user to participate in any number of experiments and belong to any alternative in each experiment. This has the possible downside of a variation in one experiment influencing the outcome of another. To address this, setting the `allow_multiple_experiments` config option to 'control' like so: ```ruby Split.configure do |config| config.allow_multiple_experiments = 'control' end ``` For this to work, each and every experiment you define must have an alternative named 'control'. This will allow the user to participate in multiple experiments as long as the user belongs to the alternative 'control' in each experiment. As soon as the user belongs to an alternative named something other than 'control' the user may not participate in any more experiments. Calling ab_test(<other experiments>) will always return the first alternative without adding the user to that experiment. ### Experiment Persistence Split comes with three built-in persistence adapters for storing users and the alternatives they've been given for each experiment. By default Split will store the tests for each user in the session. You can optionally configure Split to use a cookie, Redis, or any custom adapter of your choosing. #### Cookies ```ruby Split.configure do |config| config.persistence = :cookie end ``` When using the cookie persistence, Split stores data into an anonymous tracking cookie named 'split', which expires in 1 year. To change that, set the `persistence_cookie_length` in the configuration (unit of time in seconds). ```ruby Split.configure do |config| config.persistence = :cookie config.persistence_cookie_length = 2592000 # 30 days end ``` The data stored consists of the experiment name and the variants the user is in. Example: { "experiment_name" => "variant_a" } __Note:__ Using cookies depends on `ActionDispatch::Cookies` or any identical API #### Redis Using Redis will allow ab_users to persist across sessions or machines. ```ruby Split.configure do |config| config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: -> (context) { context.current_user_id }) # Equivalent # config.persistence = Split::Persistence::RedisAdapter.with_config(lookup_by: :current_user_id) end ``` Options: * `lookup_by`: method to invoke per request for uniquely identifying ab_users (mandatory configuration) * `namespace`: separate namespace to store these persisted values (default "persistence") * `expire_seconds`: sets TTL for user key. (if a user is in multiple experiments most recent update will reset TTL for all their assignments) #### Dual Adapter The Dual Adapter allows the use of different persistence adapters for logged-in and logged-out users. A common use case is to use Redis for logged-in users and Cookies for logged-out users. ```ruby cookie_adapter = Split::Persistence::CookieAdapter redis_adapter = Split::Persistence::RedisAdapter.with_config( lookup_by: -> (context) { context.send(:current_user).try(:id) }, expire_seconds: 2592000) Split.configure do |config| config.persistence = Split::Persistence::DualAdapter.with_config( logged_in: -> (context) { !context.send(:current_user).try(:id).nil? }, logged_in_adapter: redis_adapter, logged_out_adapter: cookie_adapter) config.persistence_cookie_length = 2592000 # 30 days end ``` #### Custom Adapter Your custom adapter needs to implement the same API as existing adapters. See `Split::Persistence::CookieAdapter` or `Split::Persistence::SessionAdapter` for a starting point. ```ruby Split.configure do |config| config.persistence = YourCustomAdapterClass end ``` ### Trial Event Hooks You can define methods that will be called at the same time as experiment alternative participation and goal completion. For example: ``` ruby Split.configure do |config| config.on_trial = :log_trial # run on every trial config.on_trial_choose = :log_trial_choose # run on trials with new users only config.on_trial_complete = :log_trial_complete end ``` Set these attributes to a method name available in the same context as the `ab_test` method. These methods should accept one argument, a `Trial` instance. ``` ruby def log_trial(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_choose(trial) logger.info "[new user] experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end def log_trial_complete(trial) logger.info "experiment=%s alternative=%s user=%s complete=true" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` #### Views If you are running `ab_test` from a view, you must define your event hook callback as a [helper_method](https://apidock.com/rails/AbstractController/Helpers/ClassMethods/helper_method) in the controller: ``` ruby helper_method :log_trial_choose def log_trial_choose(trial) logger.info "experiment=%s alternative=%s user=%s" % [ trial.experiment.name, trial.alternative, current_user.id ] end ``` ### Experiment Hooks You can assign a proc that will be called when an experiment is reset or deleted. You can use these hooks to call methods within your application to keep data related to experiments in sync with Split. For example: ``` ruby Split.configure do |config| # after experiment reset or deleted config.on_experiment_reset = -> (example) { # Do something on reset } config.on_experiment_delete = -> (experiment) { # Do something else on delete } # before experiment reset or deleted config.on_before_experiment_reset = -> (example) { # Do something on reset } config.on_before_experiment_delete = -> (experiment) { # Do something else on delete } # after experiment winner had been set config.on_experiment_winner_choose = -> (experiment) { # Do something on winner choose } end ``` ## Web Interface Split comes with a Sinatra-based front end to get an overview of how your experiments are doing. If you are running Rails 2: You can mount this inside your app using Rack::URLMap in your `config.ru` ```ruby require 'split/dashboard' run Rack::URLMap.new \ "/" => Your::App.new, "/split" => Split::Dashboard.new ``` However, if you are using Rails 3 or higher: You can mount this inside your app routes by first adding this to the Gemfile: ```ruby gem 'split', require: 'split/dashboard' ``` Then adding this to config/routes.rb ```ruby mount Split::Dashboard, at: 'split' ``` You may want to password protect that page, you can do so with `Rack::Auth::Basic` (in your split initializer file) ```ruby # Rails apps or apps that already depend on activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & ActiveSupport::SecurityUtils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end # Apps without activesupport Split::Dashboard.use Rack::Auth::Basic do |username, password| # Protect against timing attacks: # - Use & (do not use &&) so that it doesn't short circuit. # - Use digests to stop length information leaking Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(username), ::Digest::SHA256.hexdigest(ENV["SPLIT_USERNAME"])) & Rack::Utils.secure_compare(::Digest::SHA256.hexdigest(password), ::Digest::SHA256.hexdigest(ENV["SPLIT_PASSWORD"])) end ``` You can even use Devise or any other Warden-based authentication method to authorize users. Just replace `mount Split::Dashboard, :at => 'split'` in `config/routes.rb` with the following: ```ruby match "/split" => Split::Dashboard, anchor: false, via: [:get, :post, :delete], constraints: -> (request) do request.env['warden'].authenticated? # are we authenticated? request.env['warden'].authenticate! # authenticate if not already # or even check any other condition such as request.env['warden'].user.is_admin? end ``` More information on this [here](https://steve.dynedge.co.uk/2011/12/09/controlling-access-to-routes-and-rack-apps-in-rails-3-with-devise-and-warden/) ### Screenshot ![split_screenshot](https://raw.githubusercontent.com/caser/caser.github.io/master/dashboard.png) ## Configuration You can override the default configuration options of Split like so: ```ruby Split.configure do |config| config.db_failover = true # handle Redis errors gracefully config.db_failover_on_db_error = -> (error) { Rails.logger.error(error.message) } config.allow_multiple_experiments = true config.enabled = true config.persistence = Split::Persistence::SessionAdapter #config.start_manually = false ## new test will have to be started manually from the admin panel. default false #config.reset_manually = false ## if true, it never resets the experiment data, even if the configuration changes config.include_rails_helper = true config.redis = "redis://custom.redis.url:6380" end ``` Split looks for the Redis host in the environment variable `REDIS_URL` then defaults to `redis://localhost:6379` if not specified by configure block. On platforms like Heroku, Split will use the value of `REDIS_PROVIDER` to determine which env variable key to use when retrieving the host config. This defaults to `REDIS_URL`. ### Filtering In most scenarios you don't want to have AB-Testing enabled for web spiders, robots or special groups of users. Split provides functionality to filter this based on a predefined, extensible list of bots, IP-lists or custom exclude logic. ```ruby Split.configure do |config| # bot config config.robot_regex = /my_custom_robot_regex/ # or config.bots['newbot'] = "Description for bot with 'newbot' user agent, which will be added to config.robot_regex for exclusion" # IP config config.ignore_ip_addresses << '81.19.48.130' # or regex: /81\.19\.48\.[0-9]+/ # or provide your own filter functionality, the default is proc{ |request| is_robot? || is_ignored_ip_address? || is_preview? } config.ignore_filter = -> (request) { CustomExcludeLogic.excludes?(request) } end ``` ### Experiment configuration Instead of providing the experiment options inline, you can store them in a hash. This hash can control your experiment's alternatives, weights, algorithm and if the experiment resets once finished: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], resettable: false }, :my_second_experiment => { algorithm: 'Split::Algorithms::Whiplash', alternatives: [ { name: "a", percent: 67 }, { name: "b", percent: 33 } ] } } end ``` You can also store your experiments in a YAML file: ```ruby Split.configure do |config| config.experiments = YAML.load_file "config/experiments.yml" end ``` You can then define the YAML file like: ```yaml my_first_experiment: alternatives: - a - b my_second_experiment: alternatives: - name: a percent: 67 - name: b percent: 33 resettable: false ``` This simplifies the calls from your code: ```ruby ab_test(:my_first_experiment) ``` and: ```ruby ab_finished(:my_first_experiment) ``` You can also add meta data for each experiment, which is very useful when you need more than an alternative name to change behaviour: ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metadata: { "a" => {"text" => "Have a fantastic day"}, "b" => {"text" => "Don't get hit by a bus"} } } } end ``` ```yaml my_first_experiment: alternatives: - a - b metadata: a: text: "Have a fantastic day" b: text: "Don't get hit by a bus" ``` This allows for some advanced experiment configuration using methods like: ```ruby trial.alternative.name # => "a" trial.metadata['text'] # => "Have a fantastic day" ``` or in views: ```erb <% ab_test("my_first_experiment") do |alternative, meta| %> <%= alternative %> <small><%= meta['text'] %></small> <% end %> ``` The keys used in meta data should be Strings #### Metrics You might wish to track generic metrics, such as conversions, and use those to complete multiple different experiments without adding more to your code. You can use the configuration hash to do this, thanks to the `:metric` option. ```ruby Split.configure do |config| config.experiments = { my_first_experiment: { alternatives: ["a", "b"], metric: :my_metric } } end ``` Your code may then track a completion using the metric instead of the experiment name: ```ruby ab_finished(:my_metric) ``` You can also create a new metric by instantiating and saving a new Metric object. ```ruby Split::Metric.new(:my_metric) Split::Metric.save ``` #### Goals You might wish to allow an experiment to have multiple, distinguishable goals. The API to define goals for an experiment is this: ```ruby ab_test({link_color: ["purchase", "refund"]}, "red", "blue") ``` or you can define them in a configuration file: ```ruby Split.configure do |config| config.experiments = { link_color: { alternatives: ["red", "blue"], goals: ["purchase", "refund"] } } end ``` To complete a goal conversion, you do it like: ```ruby ab_finished(link_color: "purchase") ``` Note that if you pass additional options, that should be a separate hash: ```ruby ab_finished({ link_color: "purchase" }, reset: false) ``` **NOTE:** This does not mean that a single experiment can complete more than one goal. Once you finish one of the goals, the test is considered to be completed, and finishing the other goal will no longer register. (Assuming the test runs with `reset: false`.) **Good Example**: Test if listing Plan A first result in more conversions to Plan A (goal: "plana_conversion") or Plan B (goal: "planb_conversion"). **Bad Example**: Test if button color increases conversion rate through multiple steps of a funnel. THIS WILL NOT WORK. **Bad Example**: Test both how button color affects signup *and* how it affects login, at the same time. THIS WILL NOT WORK. #### Combined Experiments If you want to test how button color affects signup *and* how it affects login at the same time, use combined experiments. Configure like so: ```ruby Split.configuration.experiments = { :button_color_experiment => { :alternatives => ["blue", "green"], :combined_experiments => ["button_color_on_signup", "button_color_on_login"] } } ``` Starting the combined test starts all combined experiments ```ruby ab_combined_test(:button_color_experiment) ``` Finish each combined test as normal ```ruby ab_finished(:button_color_on_login) ab_finished(:button_color_on_signup) ``` **Additional Configuration**: * Be sure to enable `allow_multiple_experiments` * In Sinatra include the CombinedExperimentsHelper ``` helpers Split::CombinedExperimentsHelper ``` ### DB failover solution Due to the fact that Redis has no automatic failover mechanism, it's possible to switch on the `db_failover` config option, so that `ab_test` and `ab_finished` will not crash in case of a db failure. `ab_test` always delivers alternative A (the first one) in that case. It's also possible to set a `db_failover_on_db_error` callback (proc) for example to log these errors via Rails.logger. ### Redis You may want to change the Redis host and port Split connects to, or set various other options at startup. Split has a `redis` setter which can be given a string or a Redis object. This means if you're already using Redis in your app, Split can re-use the existing connection. String: `Split.redis = 'redis://localhost:6379'` Redis: `Split.redis = $redis` For our rails app we have a `config/initializers/split.rb` file where we load `config/split.yml` by hand and set the Redis information appropriately. Here's our `config/split.yml`: ```yml development: redis://localhost:6379 test: redis://localhost:6379 staging: redis://redis1.example.com:6379 fi: redis://localhost:6379 production: redis://redis1.example.com:6379 ``` And our initializer: ```ruby split_config = YAML.load_file(Rails.root.join('config', 'split.yml')) Split.redis = split_config[Rails.env] ``` ### Redis Caching (v4.0+) In some high-volume usage scenarios, Redis load can be incurred by repeated fetches for fairly static data. Enabling caching will reduce this load. ```ruby Split.configuration.cache = true ```` This currently caches: - `Split::ExperimentCatalog.find` - `Split::Experiment.start_time` - `Split::Experiment.winner` ## Namespaces If you're running multiple, separate instances of Split you may want to namespace the keyspaces so they do not overlap. This is not unlike the approach taken by many memcached clients. This feature can be provided by the [redis-namespace](https://github.com/defunkt/redis-namespace) library. To configure Split to use `Redis::Namespace`, do the following: 1. Add `redis-namespace` to your Gemfile: ```ruby gem 'redis-namespace' ``` 2. Configure `Split.redis` to use a `Redis::Namespace` instance (possible in an initializer): ```ruby redis = Redis.new(url: ENV['REDIS_URL']) # or whatever config you want Split.redis = Redis::Namespace.new(:your_namespace, redis: redis) ``` ## Outside of a Web Session Split provides the Helper module to facilitate running experiments inside web sessions. Alternatively, you can access the underlying Metric, Trial, Experiment and Alternative objects to conduct experiments that are not tied to a web session. ```ruby # create a new experiment experiment = Split::ExperimentCatalog.find_or_create('color', 'red', 'blue') # create a new trial trial = Split::Trial.new(:experiment => experiment) # run trial trial.choose! # get the result, returns either red or blue trial.alternative.name # if the goal has been achieved, increment the successful completions for this alternative. if goal_achieved? trial.complete! end ``` ## Algorithms By default, Split ships with `Split::Algorithms::WeightedSample` that randomly selects from possible alternatives for a traditional a/b test. It is possible to specify static weights to favor certain alternatives. `Split::Algorithms::Whiplash` is an implementation of a [multi-armed bandit algorithm](http://stevehanov.ca/blog/index.php?id=132). This algorithm will automatically weight the alternatives based on their relative performance, choosing the better-performing ones more often as trials are completed. `Split::Algorithms::BlockRandomization` is an algorithm that ensures equal participation across all alternatives. This algorithm will choose the alternative with the fewest participants. In the event of multiple minimum participant alternatives (i.e. starting a new "Block") the algorithm will choose a random alternative from those minimum participant alternatives. Users may also write their own algorithms. The default algorithm may be specified globally in the configuration file, or on a per experiment basis using the experiments hash of the configuration file. To change the algorithm globally for all experiments, use the following in your initializer: ```ruby Split.configure do |config| config.algorithm = Split::Algorithms::Whiplash end ``` ## Extensions - [Split::Export](https://github.com/splitrb/split-export) - Easily export A/B test data out of Split. - [Split::Analytics](https://github.com/splitrb/split-analytics) - Push test data to Google Analytics. - [Split::Mongoid](https://github.com/MongoHQ/split-mongoid) - Store experiment data in mongoid (still uses redis). - [Split::Cacheable](https://github.com/harrystech/split_cacheable) - Automatically create cache buckets per test. - [Split::Counters](https://github.com/bernardkroes/split-counters) - Add counters per experiment and alternative. - [Split::Cli](https://github.com/craigmcnamara/split-cli) - A CLI to trigger Split A/B tests. ## Screencast Ryan bates has produced an excellent 10 minute screencast about split on the Railscasts site: [A/B Testing with Split](http://railscasts.com/episodes/331-a-b-testing-with-split) ## Blogposts * [Recipe: A/B testing with KISSMetrics and the split gem](https://robots.thoughtbot.com/post/9595887299/recipe-a-b-testing-with-kissmetrics-and-the-split-gem) * [Rails A/B testing with Split on Heroku](http://blog.nathanhumbert.com/2012/02/rails-ab-testing-with-split-on-heroku.html) ## Backers Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/split#backer)] <a href="https://opencollective.com/split/backer/0/website" target="_blank"><img src="https://opencollective.com/split/backer/0/avatar.svg"></a> <a href="https://opencollective.com/split/backer/1/website" target="_blank"><img src="https://opencollective.com/split/backer/1/avatar.svg"></a> <a href="https://opencollective.com/split/backer/2/website" target="_blank"><img src="https://opencollective.com/split/backer/2/avatar.svg"></a> <a href="https://opencollective.com/split/backer/3/website" target="_blank"><img src="https://opencollective.com/split/backer/3/avatar.svg"></a> <a href="https://opencollective.com/split/backer/4/website" target="_blank"><img src="https://opencollective.com/split/backer/4/avatar.svg"></a> <a href="https://opencollective.com/split/backer/5/website" target="_blank"><img src="https://opencollective.com/split/backer/5/avatar.svg"></a> <a href="https://opencollective.com/split/backer/6/website" target="_blank"><img src="https://opencollective.com/split/backer/6/avatar.svg"></a> <a href="https://opencollective.com/split/backer/7/website" target="_blank"><img src="https://opencollective.com/split/backer/7/avatar.svg"></a> <a href="https://opencollective.com/split/backer/8/website" target="_blank"><img src="https://opencollective.com/split/backer/8/avatar.svg"></a> <a href="https://opencollective.com/split/backer/9/website" target="_blank"><img src="https://opencollective.com/split/backer/9/avatar.svg"></a> <a href="https://opencollective.com/split/backer/10/website" target="_blank"><img src="https://opencollective.com/split/backer/10/avatar.svg"></a> <a href="https://opencollective.com/split/backer/11/website" target="_blank"><img src="https://opencollective.com/split/backer/11/avatar.svg"></a> <a href="https://opencollective.com/split/backer/12/website" target="_blank"><img src="https://opencollective.com/split/backer/12/avatar.svg"></a> <a href="https://opencollective.com/split/backer/13/website" target="_blank"><img src="https://opencollective.com/split/backer/13/avatar.svg"></a> <a href="https://opencollective.com/split/backer/14/website" target="_blank"><img src="https://opencollective.com/split/backer/14/avatar.svg"></a> <a href="https://opencollective.com/split/backer/15/website" target="_blank"><img src="https://opencollective.com/split/backer/15/avatar.svg"></a> <a href="https://opencollective.com/split/backer/16/website" target="_blank"><img src="https://opencollective.com/split/backer/16/avatar.svg"></a> <a href="https://opencollective.com/split/backer/17/website" target="_blank"><img src="https://opencollective.com/split/backer/17/avatar.svg"></a> <a href="https://opencollective.com/split/backer/18/website" target="_blank"><img src="https://opencollective.com/split/backer/18/avatar.svg"></a> <a href="https://opencollective.com/split/backer/19/website" target="_blank"><img src="https://opencollective.com/split/backer/19/avatar.svg"></a> <a href="https://opencollective.com/split/backer/20/website" target="_blank"><img src="https://opencollective.com/split/backer/20/avatar.svg"></a> <a href="https://opencollective.com/split/backer/21/website" target="_blank"><img src="https://opencollective.com/split/backer/21/avatar.svg"></a> <a href="https://opencollective.com/split/backer/22/website" target="_blank"><img src="https://opencollective.com/split/backer/22/avatar.svg"></a> <a href="https://opencollective.com/split/backer/23/website" target="_blank"><img src="https://opencollective.com/split/backer/23/avatar.svg"></a> <a href="https://opencollective.com/split/backer/24/website" target="_blank"><img src="https://opencollective.com/split/backer/24/avatar.svg"></a> <a href="https://opencollective.com/split/backer/25/website" target="_blank"><img src="https://opencollective.com/split/backer/25/avatar.svg"></a> <a href="https://opencollective.com/split/backer/26/website" target="_blank"><img src="https://opencollective.com/split/backer/26/avatar.svg"></a> <a href="https://opencollective.com/split/backer/27/website" target="_blank"><img src="https://opencollective.com/split/backer/27/avatar.svg"></a> <a href="https://opencollective.com/split/backer/28/website" target="_blank"><img src="https://opencollective.com/split/backer/28/avatar.svg"></a> <a href="https://opencollective.com/split/backer/29/website" target="_blank"><img src="https://opencollective.com/split/backer/29/avatar.svg"></a> ## Sponsors Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/split#sponsor)] <a href="https://opencollective.com/split/sponsor/0/website" target="_blank"><img src="https://opencollective.com/split/sponsor/0/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/1/website" target="_blank"><img src="https://opencollective.com/split/sponsor/1/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/2/website" target="_blank"><img src="https://opencollective.com/split/sponsor/2/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/3/website" target="_blank"><img src="https://opencollective.com/split/sponsor/3/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/4/website" target="_blank"><img src="https://opencollective.com/split/sponsor/4/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/5/website" target="_blank"><img src="https://opencollective.com/split/sponsor/5/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/6/website" target="_blank"><img src="https://opencollective.com/split/sponsor/6/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/7/website" target="_blank"><img src="https://opencollective.com/split/sponsor/7/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/8/website" target="_blank"><img src="https://opencollective.com/split/sponsor/8/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/9/website" target="_blank"><img src="https://opencollective.com/split/sponsor/9/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/10/website" target="_blank"><img src="https://opencollective.com/split/sponsor/10/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/11/website" target="_blank"><img src="https://opencollective.com/split/sponsor/11/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/12/website" target="_blank"><img src="https://opencollective.com/split/sponsor/12/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/13/website" target="_blank"><img src="https://opencollective.com/split/sponsor/13/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/14/website" target="_blank"><img src="https://opencollective.com/split/sponsor/14/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/15/website" target="_blank"><img src="https://opencollective.com/split/sponsor/15/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/16/website" target="_blank"><img src="https://opencollective.com/split/sponsor/16/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/17/website" target="_blank"><img src="https://opencollective.com/split/sponsor/17/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/18/website" target="_blank"><img src="https://opencollective.com/split/sponsor/18/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/19/website" target="_blank"><img src="https://opencollective.com/split/sponsor/19/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/20/website" target="_blank"><img src="https://opencollective.com/split/sponsor/20/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/21/website" target="_blank"><img src="https://opencollective.com/split/sponsor/21/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/22/website" target="_blank"><img src="https://opencollective.com/split/sponsor/22/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/23/website" target="_blank"><img src="https://opencollective.com/split/sponsor/23/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/24/website" target="_blank"><img src="https://opencollective.com/split/sponsor/24/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/25/website" target="_blank"><img src="https://opencollective.com/split/sponsor/25/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/26/website" target="_blank"><img src="https://opencollective.com/split/sponsor/26/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/27/website" target="_blank"><img src="https://opencollective.com/split/sponsor/27/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/28/website" target="_blank"><img src="https://opencollective.com/split/sponsor/28/avatar.svg"></a> <a href="https://opencollective.com/split/sponsor/29/website" target="_blank"><img src="https://opencollective.com/split/sponsor/29/avatar.svg"></a> ## Contribute Please do! Over 70 different people have contributed to the project, you can see them all here: https://github.com/splitrb/split/graphs/contributors. ### Development The source code is hosted at [GitHub](https://github.com/splitrb/split). Report issues and feature requests on [GitHub Issues](https://github.com/splitrb/split/issues). You can find a discussion form on [Google Groups](https://groups.google.com/d/forum/split-ruby). ### Tests Run the tests like this: # Start a Redis server in another tab. redis-server bundle rake spec ### A Note on Patches and Pull Requests * Fork the project. * Make your feature addition or bug fix. * Add tests for it. This is important so I don't break it in a future version unintentionally. * Add documentation if necessary. * Commit. Do not mess with the rakefile, version, or history. (If you want to have your own version, that is fine. But bump the version in a commit by itself, which I can ignore when I pull.) * Send a pull request. Bonus points for topic branches. ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms. ## Copyright [MIT License](LICENSE) © 2019 [Andrew Nesbitt](https://github.com/andrew). <MSG> Merge pull request #358 from andreibondarev/fix-readme-typo Fix typo in README <DFF> @@ -36,7 +36,7 @@ gem install split ### Rails -Adding `gem 'split'` to your Gemfile will autoloaded it when rails starts up, as long as you've configured redis it will 'just work'. +Adding `gem 'split'` to your Gemfile will autoload it when rails starts up, as long as you've configured redis it will 'just work'. ### Sinatra
1
Merge pull request #358 from andreibondarev/fix-readme-typo
1
.md
md
mit
splitrb/split
10070845
<NME> safemarkup.py <BEF> ADDFILE <MSG> Catch restructuredtext exceptions, just show as text instead. <DFF> @@ -0,0 +1,28 @@ +from django import template +from django.conf import settings +from django.utils.encoding import smart_str, force_unicode +from django.utils.safestring import mark_safe + +register = template.Library() + + +def saferst(value): + try: + from docutils.core import publish_parts + except ImportError: + return force_unicode(value) + + docutils_setttings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", + dict()) + + try: + parts = publish_parts(source=smart_str(value), + writer_name="html4css1", + settings_overrides=docutils_settings) + except: + return foce_unicode(value) + else: + return mark_safe(force_unicode(parts["fragment"])) +saferst.is_safe = True +register.filter(saferst) +
28
Catch restructuredtext exceptions, just show as text instead.
0
.py
py
bsd-3-clause
ask/chishop
10070846
<NME> layout.erb <BEF> <!DOCTYPE html> <html> <head> <meta content='text/html; charset=utf-8' http-equiv='Content-Type'> <link href="<%= url 'reset.css' %>" media="screen" rel="stylesheet" type="text/css"> <link href="<%= url 'style.css' %>" media="screen" rel="stylesheet" type="text/css"> <script type="text/javascript" src='<%= url 'dashboard.js' %>'></script> <script type="text/javascript" src='<%= url 'jquery-1.11.1.min.js' %>'></script> <script type="text/javascript" src='<%= url 'dashboard-filtering.js' %>'></script> <title>Split</title> </head> <body> <div class="header"> <h1>Split Dashboard</h1> <p class="environment"><%= @current_env %></p> </div> <div id="main"> <%= yield %> </div> <div id="footer"> <p>Powered by <a href="http://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html> <MSG> Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https Fix URLs to replace http with https <DFF> @@ -21,7 +21,7 @@ </div> <div id="footer"> - <p>Powered by <a href="http://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> + <p>Powered by <a href="https://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html>
1
Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https
1
.erb
erb
mit
splitrb/split
10070847
<NME> layout.erb <BEF> <!DOCTYPE html> <html> <head> <meta content='text/html; charset=utf-8' http-equiv='Content-Type'> <link href="<%= url 'reset.css' %>" media="screen" rel="stylesheet" type="text/css"> <link href="<%= url 'style.css' %>" media="screen" rel="stylesheet" type="text/css"> <script type="text/javascript" src='<%= url 'dashboard.js' %>'></script> <script type="text/javascript" src='<%= url 'jquery-1.11.1.min.js' %>'></script> <script type="text/javascript" src='<%= url 'dashboard-filtering.js' %>'></script> <title>Split</title> </head> <body> <div class="header"> <h1>Split Dashboard</h1> <p class="environment"><%= @current_env %></p> </div> <div id="main"> <%= yield %> </div> <div id="footer"> <p>Powered by <a href="http://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html> <MSG> Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https Fix URLs to replace http with https <DFF> @@ -21,7 +21,7 @@ </div> <div id="footer"> - <p>Powered by <a href="http://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> + <p>Powered by <a href="https://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html>
1
Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https
1
.erb
erb
mit
splitrb/split
10070848
<NME> layout.erb <BEF> <!DOCTYPE html> <html> <head> <meta content='text/html; charset=utf-8' http-equiv='Content-Type'> <link href="<%= url 'reset.css' %>" media="screen" rel="stylesheet" type="text/css"> <link href="<%= url 'style.css' %>" media="screen" rel="stylesheet" type="text/css"> <script type="text/javascript" src='<%= url 'dashboard.js' %>'></script> <script type="text/javascript" src='<%= url 'jquery-1.11.1.min.js' %>'></script> <script type="text/javascript" src='<%= url 'dashboard-filtering.js' %>'></script> <title>Split</title> </head> <body> <div class="header"> <h1>Split Dashboard</h1> <p class="environment"><%= @current_env %></p> </div> <div id="main"> <%= yield %> </div> <div id="footer"> <p>Powered by <a href="http://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html> <MSG> Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https Fix URLs to replace http with https <DFF> @@ -21,7 +21,7 @@ </div> <div id="footer"> - <p>Powered by <a href="http://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> + <p>Powered by <a href="https://github.com/splitrb/split">Split</a> v<%=Split::VERSION %></p> </div> </body> </html>
1
Merge pull request #584 from giraffate/fix_URLs_to_replace_http_with_https
1
.erb
erb
mit
splitrb/split
10070849
<NME> redis_adapter.rb <BEF> # frozen_string_literal: true module Split module Persistence class RedisAdapter DEFAULT_CONFIG = { namespace: "persistence" }.freeze attr_reader :redis_key def initialize(context, key = nil) if key @redis_key = "#{self.class.config[:namespace]}:#{key}" elsif lookup_by = self.class.config[:lookup_by] if lookup_by.respond_to?(:call) key_frag = lookup_by.call(context) else key_frag = context.send(lookup_by) end @redis_key = "#{self.class.config[:namespace]}:#{key_frag}" else raise "Please configure lookup_by" end end def [](field) Split.redis.hget(redis_key, field) end def []=(field, value) Split.redis.hset(redis_key, field, value) expire_seconds = self.class.config[:expire_seconds] Split.redis.expire(redis_key, expire_seconds) if expire_seconds end def delete(field) Split.redis.hdel(redis_key, field) end def keys Split.redis.hkeys(redis_key) end def self.find(user_id) new(nil, user_id) end def self.with_config(options={}) self.config.merge!(options) self end def self.config @config ||= DEFAULT_CONFIG.dup end def self.reset_config! @config = DEFAULT_CONFIG.dup end end end end <MSG> Fix Layout/SpaceAroundEqualsInParameterDefault <DFF> @@ -44,7 +44,7 @@ module Split new(nil, user_id) end - def self.with_config(options={}) + def self.with_config(options = {}) self.config.merge!(options) self end
1
Fix Layout/SpaceAroundEqualsInParameterDefault
1
.rb
rb
mit
splitrb/split