{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \r\n\r\n```\nThat's pretty strange, works for me just fine:\r\n```\r\n\r\n--\r\n  | \r\n  | \r\n  | DVC Plot\r\n  | \r\n  | \r\n  | \r\n  | \r\n  | \r\n  |
\r\n  | \r\n  | \r\n  | \r\n```\n@jorgeorpinel did you use standard templates?\n> ```\r\n> \r\n> --\r\n> | \r\n> | \r\n> ...\r\n> ```\r\n\r\nTehehe copy paste from Chrome \"view source\" introduces a bunch of characters (works properly with ctrl+shift+v for me).\n> did you use standard templates?\r\n\r\n🤦 ! I mistakenly used the `-t train.json` flag thinking it meant `--targets` but it means `--template` ! `dvc plots diff --targets train.json` works.\nMaybe we should ~~reconsider the option letters? Or at least~~ print a warning when a provided template file does not exist or is invalid! Cc @pared Thanks!\n@jorgeorpinel It already throws if the template is not found:\r\n\r\n```\r\n(dvc-3.8.3) ➜ dvc git:(fix-3897) ✗ dvc plots diff -t sdf\r\nERROR: Template 'sdf' not found.\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\r\n\r\nIt is not able to detect a bad template, that is true, but I'm not completely sure if we can do that in a general case. Seems like the best we could do is to check that it is indeed a json/yaml (once we get rid of html support), but I'm not sure we could require it to hit any quotas on anchors that it uses. CC @pared \n> not able to detect a bad template, that is true, but I'm not completely sure if we can do that in a general case\r\n\r\nMaybe let's just keep an eye on this to see if users make the same error I did and at least detect when a given template is actually a metrics file in that case (and WARN/ERROR out).\nWe can check that template is valid by checking for `DVC_METRIC_DATA` anchor. I don't see a use case where one would like to use `dvc plots` and not provide it. \r\n\r\nAlso if we are talking about JSON templates, what is a common property of a JSON specification is `$schema` field.\r\neg\r\n- for vega:\r\n`\"$schema\": \"https://vega.github.io/schema/vega/v5.json\"`\r\n- vega lite:\r\n` \"$schema\": \"https://vega.github.io/schema/vega-lite/v4.json\"`\nIt can even be done by [JSON schema validation](https://datatracker.ietf.org/doc/draft-handrews-json-schema-validation/) but seems overkill (unless there's a quick python lib that does easily).\nTaking a quick peek at available resources, https://github.com/Julian/jsonschema seems like the most popular python-based and maintained package.\r\n\r\nThe question is whether we want to include a new package for one functionality validation.\r\n\r\n If we do, it might help us solving `plot update` (if we decide to do it) from https://github.com/iterative/dvc/issues/3906 \r\n\r\nI think it would make sense.\n@pared So looks like our templates have the schema already, which validates the generated template, right? So we are covered there. The only thing that we need to validate is that the template is indeed using our anchors.\n@efiop yep, I think this would solve this issue.":1,"Hi @mhham !\r\n\r\nThanks for reporting this issue! The original intension was to just not make it fatal if some download fails, but that approach is clearly faulty. We need to download everything we can and then error-out, so `dvc pull` doesn't report \"success\". \n- [ ] refactor download/upload methods to unify them\r\n- [ ] raise an error if download/upload failed, but download/upload what we can":1,"CC: @pmrowla \nhttps://github.com/iterative/dvc/blob/13c1314099df131f526177b2a75bda02dfc0cdbf/dvc/repo/experiments/pull.py#L39-L40\r\n\r\nthis should be returning `ExpRefInfo.from_ref(exp_name)` instead of the string `exp_name`":1,"For the record: `--run-cache` flag is just a temporary solution and is not meant to accept targets. We will likely just support `dvc pull target` that will then somehow look into the run-cache by itself and pull by itself or suggest a hint on how to do it. run-cache is really an implementation detail that should be hidden away. We need to take a closer look at this.\nOh, ok. Is there any kind of estimative on this being prioritized? \r\n\r\nAs described in the forum, I was planning to use `--run-cache` as a workaround for deploying models, but the lack of targets selection makes it impossible.\nHi @maricatovictor ! Sorry for the delay. We've added this task to the next sprint (starting next week).":1,"Thanks for reporting, if there's an easy fix, happy to accept a PR for 1.10.3.\r\n\r\nOtherwise, let's make sure we get this right in v2. Tbh, there are lots of types that currently break JsonSchema. ":1,"> “exited with 2” Is it useful information? Where the exit code 2 belongs to? It might confuse users. Error code can be in debug errors not here.\r\n\r\nI disagree with this, exit codes are important, well that's how you know it's succeeded or failed (or crashed).\r\n\r\n> Do we really need [Errno 2]? In what cases the code helps users? This makes the message longer and breaks human readability.\r\n> $ dvc exp run\r\n> ERROR: failed to reproduce 'dvc.yaml': [Errno 2] No such file or directory: '/Users/dmitry/src/test/myproj/data'\r\n\r\nThis is unrelated to `dvc exp init` and out of scope to be honest for this. This is part of a larger issue with DVC and happens in all of the commands. When I brought this a year ago for https://github.com/iterative/dvc/issues/5387, there were disagreements on how to handle these errors (cc @efiop).\r\n\nFrom #7130:\r\n\r\n> `dvc exp init` is not fully supported without interactive mode.\r\nIt looks like today the only supported flow is — interactive. The default `dvc init` has multiple assumptions about the project structure and it is not clear how a user should know about the assumptions.\r\n\r\nFair point @dmpetrov. Making the output consistent between them can make non-interactive mode more like `dvc exp init -i`.\n```\r\n'src' does not exist in the workspace. \"exp run\" may fail.\r\n'data' does not exist in the workspace. \"exp run\" may fail.\r\n```\r\n\r\nI'm proposing creating these by default (plus `params.yaml`). Users need a single simple command to wrap their `python train.py` without manually creating all the structures.\nThere is a rule on https://clig.dev/#output - `Display output on success, but keep it brief.` It feels like these are not needed in the output if we follow the rule.\r\n\r\nTwo issues are here:\r\n\r\n1. Do we need the content of dvc.yaml in the output?\r\n```\r\ntrain:\r\n cmd: python train.py\r\n deps:\r\n - data\r\n```\r\n\r\n2. Do we need links in the output? (Especially as a single line)\r\n```\r\nSee https://s.dvc.org/g/pipeline-files.\r\n...\r\nSee https://s.dvc.org/g/exp/run.\r\n```\n> There is a rule on https://clig.dev/#output - `Display output on success, but keep it brief.` It feels like these are not needed in the output if we follow the rule.\r\n\r\nI think @skshetry would agree, which explains why there is so little output in non-interactive mode now. I suggested more verbose output since this command is tailored to users with no prior dvc knowledge.\r\n\r\n> 1. Do we need the content of dvc.yaml in the output?\r\n\r\nNo. It can be an intro to dvc stage syntax, but it's not necessary. We also don't need to show it or prompt for confirmation in `dvc exp init -i`.\r\n\r\n> Do we need links in the output? (Especially as a single line)\r\n\r\nNo. Again, I/we opted for more verbose output to guide users more. It's not output that is/needs to be machine readable, and I thought usability should trump style in this case, but obviously it can be removed.\r\n\r\n---\r\n\r\nGiven those suggestions, the basic output can be:\r\n\r\n```\r\n$ dvc exp init python train.py\r\nDVC assumes the following workspace structure:\r\n├── data\r\n├── metrics.json\r\n├── models\r\n├── params.yaml\r\n├── plots\r\n└── src\r\n\r\nCreated train stage in dvc.yaml. To run, use \"dvc exp run\".\r\n```\r\n\r\nShould we also make interactive mode more concise?\r\n\r\n```\r\n$ dvc exp init -i python train.py\r\nDVC assumes the following workspace structure:\r\n├── data\r\n├── metrics.json\r\n├── models\r\n├── params.yaml\r\n├── plots\r\n└── src\r\n\r\nPath to a code file/directory [src, n to omit]:\r\nPath to a data file/directory [data, n to omit]:\r\nPath to a model file/directory [models, n to omit]:\r\nPath to a parameters file [params.yaml, n to omit]:\r\nPath to a metrics file [metrics.json, n to omit]:\r\nPath to a plots file/directory [plots, n to omit]:\r\n\r\nCreated train stage in dvc.yaml. To run, use \"dvc exp run\".\r\n```\nOne thought on this: having quasi-unix style makes sense for dvc. In this case, I'm not so sure since the intended audience is less likely to be people familiar with that style.":1,"For experiments this is expected and intentional, exp names are not allowed to contain slashes (even though they are valid in git refs)\r\n\r\nsee: https://github.com/iterative/dvc/pull/6848#issuecomment-950487613\r\n\r\ne: reopened since for branches/tags we should still display them with the slash\n@mattseddon can we do a workaround for now ... pass hash id instead into `dvc plots diff`? \nI have been looking at how to make that hack. I would need to create a mapping between the name and the sha but the fix is not as simple as you would initially think. The returned plots data will have the hash id all through it as the rev field. We will have to replace all instances of that hash id in the data in order to show the semi-correct name in the UI/show the correct plots data.\n> The returned plots data will have the hash id all through it as the rev field\r\n\r\nYes, and it might be fine for now as a workaround for names that we know are problematic, wdyt? Image plots will be fine, trends also (?), only plots like confusion matrixes will have these hash ids?\r\n\n> e: reopened since for branches/tags we should still display them with the slash\r\n\r\nWhat's the level of effort to support it?\r\n\r\n> For experiments this is expected and intentional, exp names are not allowed to contain slashes (even though they are valid in git refs)\r\n> \r\n> see: [#6848 (comment)](https://github.com/iterative/dvc/pull/6848#issuecomment-950487613)\r\n\r\nThe comment suggests hesitance to support slashes because the exp refs conventions aren't stable enough. Do you still have those concerns @pmrowla?\nThis is just a UI issue, it is separate from the \"should exp names be allowed to contain slashes\" question. The problem here is that if you have a git branch named `foo/bar`, we display it as `bar` due to something parsing git branches by splitting at the last path separator.\r\n\r\nExp names should still be prohibited from having slashes. But git branches and tags are allowed to have slashes, and we need to display them properly in DVC commands\nIf it's a small effort, could we please address this? \n> If it's a small effort, could we please address this?\r\n\r\nI will open P.R. later":1,"perhaps creating & using `logging.TRACE = 5` is better than this env var?\n@casperdcl could you explain a little bit more? I'm not sure where the user should set the `logging.TRACE = 5` is it through the config file?\n\ncmd|level\n--:|:--\ndefault|INFO\n`-v`|DEBUG\n`-vv`|TRACE\nOhh got it :+1: yep, that's what I purposed first on the linked issue:\r\n\r\n> My suggestion would be to create different verbose levels, -vvv (INFO, DEBUG, DATABASE?)\r\n> ...\r\n> Modify the -v so you can express the level of verbosity with several ones -vvv\r\n> https://github.com/iterative/dvc/issues/2329#issue-473668442\r\n\r\nI like this idea better instead of using the env var\nthough really I'd prefer `--log TRACE|DEBUG|INFO|WARN(ING)|ERROR|FATAL`, and for backward compatibility `-v` is a shortcut for `--log DEBUG` \n@casperdcl is this more common? I'm more used to the v's :sweat_smile: \r\n\r\nEven the `argparse` documentation has a section on it:\r\n```python\r\n>>> parser = argparse.ArgumentParser()\r\n>>> parser.add_argument('--verbose', '-v', action='count')\r\n>>> parser.parse_args(['-vvv'])\r\nNamespace(verbose=3)\r\n```\nthe problem with counting `-v` is there's no way to access `WARN` and `ERROR`. We could count `-q` (quiet) or similar for that.\n@casperdcl so, it would start on `INFO` by deafult, if you want just warnings then use one `-q`, if you want to silence everything `-qq`, and one `-v` will be for `DEBUG` and two for `TRACE`, right?\r\n\r\n:thinking: let's wait for the opinion of @shcheklein, @efiop , @Suor , @pared (need some quorum on this one, can't decide by myself)\nI am ok with counting `v/q`s, this is more friendly for typing in. `--log` might be useful for scripting - more obvious when you read it, not sure how much common scripting dvc is though.\r\n\r\nSo my preference: implement `v/q`s, and keep `--log` up our sleeves for now.\nAwesome, @Suor , I'll add those implementation details to the description.\nperhaps creating & using `logging.TRACE = 5` is better than this env var?\n@casperdcl could you explain a little bit more? I'm not sure where the user should set the `logging.TRACE = 5` is it through the config file?\n\ncmd|level\n--:|:--\ndefault|INFO\n`-v`|DEBUG\n`-vv`|TRACE\nOhh got it :+1: yep, that's what I purposed first on the linked issue:\r\n\r\n> My suggestion would be to create different verbose levels, -vvv (INFO, DEBUG, DATABASE?)\r\n> ...\r\n> Modify the -v so you can express the level of verbosity with several ones -vvv\r\n> https://github.com/iterative/dvc/issues/2329#issue-473668442\r\n\r\nI like this idea better instead of using the env var\nthough really I'd prefer `--log TRACE|DEBUG|INFO|WARN(ING)|ERROR|FATAL`, and for backward compatibility `-v` is a shortcut for `--log DEBUG` \n@casperdcl is this more common? I'm more used to the v's :sweat_smile: \r\n\r\nEven the `argparse` documentation has a section on it:\r\n```python\r\n>>> parser = argparse.ArgumentParser()\r\n>>> parser.add_argument('--verbose', '-v', action='count')\r\n>>> parser.parse_args(['-vvv'])\r\nNamespace(verbose=3)\r\n```\nthe problem with counting `-v` is there's no way to access `WARN` and `ERROR`. We could count `-q` (quiet) or similar for that.\n@casperdcl so, it would start on `INFO` by deafult, if you want just warnings then use one `-q`, if you want to silence everything `-qq`, and one `-v` will be for `DEBUG` and two for `TRACE`, right?\r\n\r\n:thinking: let's wait for the opinion of @shcheklein, @efiop , @Suor , @pared (need some quorum on this one, can't decide by myself)\nI am ok with counting `v/q`s, this is more friendly for typing in. `--log` might be useful for scripting - more obvious when you read it, not sure how much common scripting dvc is though.\r\n\r\nSo my preference: implement `v/q`s, and keep `--log` up our sleeves for now.\nAwesome, @Suor , I'll add those implementation details to the description.":1,"I think this is an upstream bug - https://github.com/iterative/dvc/issues/7531.\n\nWhat error are you getting? Are you using `setup-cml`? In which case you should also use `setup-dvc`.\n\nOr are you using the CML Docker images?\n> commit hides experiment results and I read that CML is not intended for experimentation\n\nSorry I don't quite follow. How do commits hide results? CML is intended for provisioning & reporting experiments.\n> > commit hides experiment results and I read that CML is not intended for experimentation\r\n> \r\n> Sorry I don't quite follow. How do commits hide results? CML is intended for provisioning & reporting experiments.\r\n\r\nWhen I say experiments I refer to using `dvc exp`. Once you commit your changes `dvc exp show` no longer shows experiments table.\r\n\r\nIn my specific case I wanted to try the following:\r\n\r\n```yaml\r\nname: experiments\r\non: [push]\r\njobs:\r\n train-model:\r\n runs-on: ubuntu-latest\r\n steps:\r\n - uses: actions/checkout@v2\r\n - uses: iterative/setup-cml@v1\r\n - uses: actions/setup-python@v2\r\n with:\r\n python-version: '3.x'\r\n - name: Train model\r\n env:\r\n repo_token: ${{ secrets.GITHUB_TOKEN }}\r\n run: |\r\n pip3 install -r requirements.txt\r\n #bash exp_file.sh\r\n dvc exp run -S train.C=0.005\r\n dvc exp run -S train.C=100\r\n\r\n echo \"## Resultados del Experimento\" >> report.md\r\n dvc exp show --only-changed --drop 'assets|src' --no-pager --md >> report.md\r\n \r\n echo \"## Parallel Plot\\n\" >> report.md\r\n dvc exp show --only-changed --drop 'assets|src' --pcp --sort-by test_recall >> report.md\r\n\r\n cml send-comment report.md\r\n```\r\n\r\nIn this particular case I wanted to report the results of 2 experiments with `dvc exp run`. I've tried also using a bash file (that you can see commented out) with the following: \r\n\r\n```bash\r\ndvc exp run --queue -S train.C=5\r\ndvc exp run --queue -S train.C=30\r\ndvc exp run --queue -S train.C=60\r\ndvc exp run --queue -S train.C=120\r\n\r\ndvc exp run --run-all\r\n```\r\n\r\nAll of the experiments resulted in something like:\r\n\r\n![image](https://user-images.githubusercontent.com/48638337/161554731-2dce5c82-c33a-46b6-a80a-8f153fc35d4e.png)\r\n\r\nSo my question would be: Is it allowed to run something like what I'm proposing?\r\nCan I run my experiments and report it in the PR?\r\n\r\nI can't remember when I read that Experimentation was meant to be done locally and only `dvc repro` on the CI Pipeline. Can you clarify that please?\r\n\r\nThanks in advance,\r\n\r\nAlfonso\nMy brain has stopped working for the day already, but I don't think there isn't any reason you shouldn't be able to do this. Without having tested anything like this I would blindly expand your checkout step to see if that fixes it:\r\n```yml\r\n- uses: actions/checkout@v3\r\n with:\r\n fetch-depth: 0\r\n```\r\nhttps://github.com/actions/checkout#fetch-all-history-for-all-tags-and-branches\n(Transferred to DVC because it looks like a config/setup issue unrelated to CML)\n@dacbd's suggestion should be correct, github's checkout action only fetches a single shallow commit by default, this isn't enough of the Git repo for `exp run` to work properly.\n@pmrowla Why does `exp run` require more than the baseline commit to be fetched?\n> @pmrowla Why does `exp run` require more than the baseline commit to be fetched?\r\n\r\nI understand you say `dvc exp run` depends on the rest of git history, probably because `dvc exp show` displays info of master metrics too. But if this is my first run of experiments `dvc exp run` should not depend on data of a different commit to work...\r\n\r\nAnyways, I'll be testing this small change during the day and I'll get back to you... \r\nThanks,\r\n\r\nAlfonso\n> @pmrowla Why does `exp run` require more than the baseline commit to be fetched?\r\n\r\nThe issue is that we use `git merge` in experiments and doing a `merge-base` operation in git requires fetch depth > 1 (the specific required depth depends on the commits being merged).\r\n\r\nBasically, doing the merge-base requires finding a common ancestor for the commits being merged (that is not any of the merge commits themselves). For our purposes w/DVC experiments, this means that a fetch depth of 2 will usually be sufficient (so baseline commit + 1 parent), but that won't be the case 100% of the time. So if users are trying to use `exp run` inside github actions, the general solution is to use `fetch-depth: 0` (and not use a shallow clone at all).\r\n\r\n@dberenbaum \nThanks @pmrowla. I guess even if we tried to minimize operations where merge is needed, it will still be needed sometimes, and it's probably better to document the proposed workaround above. Opened https://github.com/iterative/dvc.org/issues/3416.\nAh right so the CML solution would be [`cml ci --unshallow`](https://cml.dev/doc/ref/ci) (a bit better than `fetch-depth: 0`)\nIf we go with the suggestion in https://github.com/iterative/dvc.org/issues/3416#issuecomment-1089866190, we should keep this open to update the message in DVC to be more informative and helpful. Even if DVC can't tell whether it's in a CI context, it could at least suggest to fetch missing commits and maybe link to docs that could go into more depth about specific use cases like cml.\nOn the UI side, we could detect the presence of `CI`/`GITHUB_ACTIONS`/`GITLAB_CI` environment variables. \nIf we get the invalid commit exception we can just do the extra check to see whether or not we are in a shallow repo (you just have to check whether or not `.git/shallow` exists). This problem isn't specific to CI environments, it's just more likely to show up there since github uses shallow clones in CI by default.\nRight, I guess the question is whether the message should be general like \"don't use a shallow clone\" or more CI-specific like \"try fetch-depth: 0\"? I think probably the former is better even if we can detect CI, since we don't want to have to cover the syntax of different git server behaviors, although we could link to docs where that level of detail could be provided.\n+1 for a general message w/ link to docs for more specific details/solutions.\n> Right, I guess the question is whether the message should be general like \"don't use a shallow clone\" or more CI-specific like \"try fetch-depth: 0\"? I think probably the former is better even if we can detect CI, since we don't want to have to cover the syntax of different git server behaviors, although we could link to docs where that level of detail could be provided.\r\n\r\nI'd prefer general message with a CI specific hint and link to the docs wherever possible. I'd prefer less redirection as much as possible.\r\n\r\nExample:\r\n```\r\ndvc exp run\r\nERROR: Cannot run in a shallow-cloned repository\r\n\r\nIf you are using `actions/checkout`, please set `fetch-depth: 0` so that the repo is not shallow-cloned.\r\nSee https://dvc.org/doc/user-guide/troubleshooting#shallow-clone.\r\n```\r\n\nDear all, \r\nThanks for all of your answers. I think the spirit of good open source is havomng everyone really involved, and I really appreciate your help on trying to solve this issue. \r\n\r\nI implemented the steps mentioned above and it worked partially. Once I run the experiments it turns out git is not recognized in the Github Machine:\r\n\r\n![image](https://user-images.githubusercontent.com/48638337/162589259-e6fb8ac9-0006-4fa0-9a13-a482c102c93d.png)\r\n\r\nIn order to solve this issue I found a really nice action here: `fregante/setup-git-user@v1`. This helps the remote machine make the connection with git in order to make `dvc run exp` to work.\r\n\r\nThe final workflow will be like this:\r\n\r\n```yaml\r\nname: experiments\r\non: [push]\r\njobs:\r\n train-model:\r\n runs-on: ubuntu-latest\r\n steps:\r\n - uses: actions/checkout@v3\r\n with: \r\n fetch-depth: 0\r\n - uses: fregante/setup-git-user@v1\r\n - uses: iterative/setup-cml@v1\r\n - uses: iterative/setup-dvc@v1\r\n - uses: actions/setup-python@v2\r\n with:\r\n python-version: '3.x'\r\n - name: Experiment\r\n env:\r\n repo_token: ${{ secrets.GITHUB_TOKEN }}\r\n run: |\r\n pip3 install -r requirements.txt\r\n #bash exp_file.sh\r\n dvc exp run -S train.C=0.005\r\n dvc exp run -S train.C=100\r\n\r\n echo \"## Resultados del Experimento\" >> report.md\r\n dvc exp show --only-changed --drop 'assets|src' --no-pager --md >> report.md\r\n\r\n cml send-comment report.md\r\n```\r\n\r\nI think this could be mentioned as a user-case in the documentation because is a very common experimentation workflow, and if we can externalize experimentation to a Github machine or a Cloud Runner would be very good.\r\n\r\nOn another note, I noticed `iterative/setup-dvc@v1` doesn't include yaml package which is a dependency on DVC. So you have to make sure to add it to the requirements.txt otherwise DVC will complain. Of course, unless you think it's a good idea to add it in the action.\r\n\r\nThanks and hope this helps DVC,\r\n\r\nAlfonso\n@datacubeR, thanks for the feedback; since you are already using `cml`, you can take a [look at running](https://cml.dev/doc/ref/ci) `cml ci`, which would solve the git user problem. \r\n\r\n> and if we can externalize experimentation to a Github machine or a Cloud Runner would be very good.\r\n\r\nThis is part of what [`cml runner`](https://cml.dev/doc/ref/runner) command can help you with, running training workflows on cloud-hosted machines like ec2. If you decide to give it a try and run into any issues or have any other feedback be sure to let us know.\nBumping the priority of this since it is necessary for integration with cml.\r\n\r\n> ```\r\n> If you are using `actions/checkout`, please set `fetch-depth: 0` so that the repo is not shallow-cloned.\r\n> ```\r\n\r\nIs this sufficiently general? It's only applicable to GH actions AFAIK. Each git server has its own depth settings and variables, not to mention people who manually do shallow clones.\r\n\r\n> Ah right so the CML solution would be [`cml ci`](https://cml.dev/doc/ref/ci) (a bit better than `fetch-depth: 0`)\r\n\r\nAlso, this is probably a better suggestion, but only if cml is installed.\r\n\r\nSo maybe we can generalize the message and provide specific examples like `fetch-depth: 0` in the docs. I agree with the general point of @skshetry, but I don't see a way to specifically address every possible scenario in the error message.\nIf you allow me to chime in, from the user perspective I think use cases are way clearer than just random notes.\r\n\r\nFor instance, I understand these fixes are only applicable to GitHub Actions, but in my opinion there is no clear documentation on CML CI which I think won't be normally installed since docker containers or virtual machines are normally the most common way to deal with this part. \r\n\r\n\r\n\nShould note that `cml ci` does not actually unshallow the repo (but will after https://github.com/iterative/cml/pull/957). The correct fix for running `dvc exp run` inside github actions is to set `fetch-depth: 0` in the checkout action.\nKeeping this open on the DVC side until we address clarifying the shallow repo error in DVC UI and/or docs\nYup probably just adding \"`did you forget to unshallow? https://error.dvc.org/unknown-object`\" or similar to DVC's error message would be gewd.\n`dvc exp run` succeeded in the workspace (but not in a temp dir) on a shallow clone until https://github.com/iterative/dvc/commit/fa819b0b70ba5f0f7ed7010280dbe840a813c1d3:\r\n\r\n```\r\n$ dvc -V\r\n2.9.3.dev8+g37f8010b4\r\n$ git clone --depth=1 git@github.com:iterative/example-get-started.git\r\nCloning into 'example-get-started'...\r\nremote: Enumerating objects: 35, done.\r\nremote: Counting objects: 100% (35/35), done.\r\nremote: Compressing objects: 100% (25/25), done.\r\nremote: Total 35 (delta 6), reused 24 (delta 4), pack-reused 0\r\nReceiving objects: 100% (35/35), 35.62 KiB | 17.81 MiB/s, done.\r\nResolving deltas: 100% (6/6), done.\r\n$ cd example-get-started\r\n$ dvc pull\r\nA data/prepared/\r\nA data/data.xml\r\nA data/features/\r\nA model.pkl\r\n4 files added and 6 files fetched\r\n$ dvc exp run\r\n'data/data.xml.dvc' didn't change, skipping\r\nStage 'prepare' didn't change, skipping\r\nStage 'featurize' didn't change, skipping\r\nStage 'train' didn't change, skipping\r\nStage 'evaluate' didn't change, skipping\r\n$ dvc -V # Upgrade DVC version\r\n2.9.3.dev9+gfa819b0b7\r\n(dvc) dave@davids-air:/tmp/example-get-started 13:15:15\r\n$ dvc exp run\r\nERROR: Merge failed\r\n```\r\n\r\n@pmrowla It makes sense as part of making the workspace and temp implementations more consistent, but is there any way to make at least workspace runs succeed in this scenario? It breaks a previously supported workflow for anyone using CML or other CI actions with `exp run`.\r\n\r\n> The issue is that we use `git merge` in experiments and doing a `merge-base` operation in git requires fetch depth > 1 (the specific required depth depends on the commits being merged).\r\n> \r\n> Basically, doing the merge-base requires finding a common ancestor for the commits being merged (that is not any of the merge commits themselves). For our purposes w/DVC experiments, this means that a fetch depth of 2 will usually be sufficient (so baseline commit + 1 parent), but that won't be the case 100% of the time. So if users are trying to use `exp run` inside github actions, the general solution is to use `fetch-depth: 0` (and not use a shallow clone at all).\r\n\r\nIs a merge necessary to run an experiment? I thought behavior for running experiments was similar to git stash apply, which works fine in this scenario.\n> It makes sense as part of making the workspace and temp implementations more consistent, but is there any way to make at least workspace runs succeed in this scenario?\r\n\r\nPrior to that change, workspace and temp implementations were essentially two completely different things, which made testing and maintaining both difficult, and led to them having different behavior in a lot of scenarios. I don't think it is worth the development effort required to maintain two separate `exp run` implementations vs requiring that users unshallow repos in CI for both the workspace and tempdir cases.\r\n\r\n> Is a merge necessary to run an experiment? I thought behavior for running experiments was similar to git stash apply, which works fine in this scenario.\r\n\r\nIt works \"similar to\" stash apply, not exactly like `git stash apply`. We have to do a merge because libgit2/pygit2 and dulwich do not have implementations for `stash apply`-ing arbitrary merge commits. In order for us to get the similar behavior, we have to just use regular `merge` to apply the merge commit.":1,"@jorgeorpinel So having `data`, right? Git does the same thing, hence why we use `/data` in `.gitignore` when we `dvc add data`.\nMaybe it behaves differently in distinct OSs? I'm on Windows (Cmder/ Git Bash):\r\n\r\n```\r\nλ git init\r\nλ touch data data2 data3 data4\r\nλ echo data > .gitignore\r\nλ ls\r\ndata data2 data3 data4\r\nλ git status\r\n...\r\nUntracked files:\r\n (use \"git add ...\" to include in what will be committed)\r\n .gitignore\r\n data2\r\n data3\r\n data4\r\nλ git add data\r\nThe following paths are ignored by one of your .gitignore files:\r\ndata\r\nUse -f if you really want to add them.\r\nλ git add .\r\nλ git status\r\n...\r\n new file: .gitignore\r\n new file: data2\r\n new file: data3\r\n new file: data4\r\n```\n@jorgeorpinel Oh, I misunderstood your report. Need to check that, indeed. @pared Maybe you have any ideas?\nWill check this, sorry for late response\n@jorgeorpinel that was happening on windows right?\r\nfollowing test is passing on my linux machine for `0.90.2`:\r\n```\r\ndef test_ignore_data(tmp_dir, dvc):\r\n tmp_dir.gen({\"data\": \"data\", \"data1\": \"data1\", \"data2\": \"data2\",\r\n \"data3\":\"data3\", \".dvcignore\": \"data\"})\r\n\r\n\r\n files = []\r\n for r, d, fs in dvc.tree.walk(\".\"):\r\n files.extend([os.path.join(r, f) for f in fs])\r\n\r\n assert set(files) == set([\"./data1\", \"./data2\", \"./data3\", \"./.dvcignore\"])\r\n```\nSorry guys. I don't remember which DVC commands I noticed originally that don't respect files in .dvcignore. It wasn't a great report... But I'm QAing this and I think there's definitely some funny stuff happening:\r\n\r\n```console\r\n$ echo data > data\r\n$ echo data > .dvcignore\r\n$ dvc add data\r\n100% Add|██... # Works and puts it in .gitignore. Should ignore? E.g.:\r\n\r\n$ git add data\r\nThe following paths are ignored by one of your .gitignore files:\r\ndata\r\nUse -f if you really want to add them.\r\n```\r\n\r\nI'm on 0.93.0 from exe installer now. Yes, Windows (Git Bash on Cmder)\r\n\r\nAlso, the first example in https://dvc.org/doc/user-guide/dvcignore#examples doesn't work for me (all files inside data/ dir are added and cached). I didn't check the others.\r\n\n@jorgeorpinel hmm, that does not seem right. \r\nI think we should behave in a similar way as git does, which is informing the user that his output collides with an entry of some `.dvcignore` file.\r\nThis functionality can be implemented probably after we introduce something like `git check-ignore` for `.dvcignore`. I think there was an issue for that already, but I am unable to find it.\nHello! Update here, I'm on DVC 1.3 and this seems solved, although the message I get,\r\n\r\n```console\r\n$ dvc add data\r\nAdding...\r\nERROR: output 'data' does not exist\r\n```\r\n\r\ncould be improved, similar to what `git add` uses for files in .gitignore patterns:\r\n\r\n```\r\nThe following paths are ignored by one of your .gitignore files:\r\ndata\r\nUse -f if you really want to add them.\r\n```\r\n\r\nAnd, should we have a `-f`/`--force` option for add as well?\r\n\r\nThanks\n@jorgeorpinel Thanks for the update! Indeed, that message could be improved.\r\n\r\n> And, should we have a -f/--force option for add as well?\r\n\r\nI would rather not do that. We've seen it causing very odd bugs in git, wouldn't want that for dvc unless there are some very good scenarios. Would wait for someone to ask for it.\nIt seems the conflation of concepts \"exists\" and \"ignored\" leads to confusing error messages. In my case one of \"run\" outputs was by mistake ignored in .dvcignore, but the respective message was confusingly saying the file \"does not exist\". Possibly related source: [dvc/tree/local.py#L79](https://github.com/iterative/dvc/blob/12c7013062c89a4f9f75d7459a7ed654f3ac174f/dvc/tree/local.py#L79).\n@autayeu hi!\r\nCould you share some info on your setup?\r\n`.dvcignore` contained your `run` output before you actually `dvc run` it, right?\nSure. Attached [dvc.message.repro.zip](https://github.com/iterative/dvc/files/5063241/dvc.message.repro.zip) is a minimum setup to reproduce the issue. To reproduce:\r\n\r\nUnpack, change into dvc.message.repro directory and run:\r\n```bash\r\ndvc run -n preprocess -d preprocess.py -o data.txt -o data.log \"python -u preprocess.py | tee -i data.log\"\r\n```\r\nYou'll see:\r\n```text\r\nRunning stage 'preprocess' with command:\r\n\tpython -u preprocess.py | tee -i data.log\r\nscreen content\r\nERROR: output 'data.log' does not exist\r\n```\r\nHowever:\r\n```bash\r\n(dvc) zzz:dvc.message.repro zzz$ ls -l\r\ntotal 24\r\n-rw-r--r-- 1 zzz zzz 15 Aug 12 15:45 data.log\r\n-rw-r--r-- 1 zzz zzz 12 Aug 12 15:45 data.txt\r\n-rw-r--r-- 1 zzz zzz 84 Aug 12 15:39 preprocess.py\r\n```\r\nSo data.log does exist. A bit unexpectedly, dvc also created empty .gitignore file:\r\n```bash\r\n(dvc) zzz:dvc.message.repro zzz$ ls -l .gitignore\r\n-rw-r--r-- 1 zzz zzz 0 Aug 12 16:05 .gitignore\r\n```\r\n\r\nOther relevant outputs:\r\n```bash\r\n(dvc) zzz:dvc.message.repro zzz$ cat .dvcignore\r\n*.log\r\n```\r\n```bash\r\n(qac) zzz:dvc.message.repro zzz$ dvc version\r\nDVC version: 1.4.0+12c701\r\n\r\n---------------------------------\r\nPlatform: Python 3.8.5 on macOS-10.15.6-x86_64-i386-64bit\r\nSupports: azure, gdrive, gs, hdfs, http, https, s3, ssh, oss\r\nCache types: reflink, hardlink, symlink\r\nRepo: dvc, git\r\n```\n@autayeu Thank you! It seems to me that we should rethink the message.":1,"Yep it's a bug. Since for python `Union[int, float] == Union[float, int]`, the key in the cache is the same.\r\nI guess we could use `(cls, params, get_args(params))` as key of `_generic_types_cache` instead of `(cls, params)`\n> Yep it's a bug. Since for python `Union[int, float] == Union[float, int]`, the key in the cache is the same.\r\n\r\nGood to hear!\r\n\r\n> I guess we could use `(cls, params, get_args(params))` as key of `_generic_types_cache` instead of `(cls, params)`\r\n\r\nWhat about nested models, say `List[Union[float, int]]` and `List[Union[int, float]]`? Wouldn’t these still be considered equal? \nIs this new in v1.10?\n> Is this new in v1.10?\r\n\r\nNo, saw it first in 1.9.0 and then updated to check if it was still there in the newest version. It is probably as old as the current specification of the key in `_generic_types_cache`.\nHumm, can't remember if that was new in 1.9 or 1.8.\r\n\r\nThe question is whether we should fix this in a patch release of 1.10 or wait for V2?\n> The question is whether we should fix this in a patch release of 1.10 or wait for V2?\r\n\r\nThat’s obviously not for me to say, but personally I think it would have been nice with a patch of at least the basic (non-nested) case, as the bug is breaking several of my tests.\nThis gets more complicated, as the more-or-less exact same issue is present in the `typing` library itself, e.g.\r\n\r\n```python\r\nfrom typing import get_args, List, Union\r\n\r\nprint(get_args(Union[int, float]))\r\nprint(get_args(Union[float, int]))\r\nprint(get_args(List[Union[float, int]]))\r\nprint(get_args(List[Union[int, float]]))\r\n```\r\n\r\nPrints:\r\n\r\n```\r\n(, )\r\n(, )\r\n(typing.Union[float, int],)\r\n(typing.Union[float, int],)\r\n```\r\n\r\nThis is discussed in [this CPython issue](https://github.com/python/cpython/issues/86483), which include comments by Guido, and which resulted in the following documentation change:\r\n\r\n> If X is a union or [Literal](https://docs.python.org/3/library/typing.html#typing.Literal) contained in another generic type, the order of (Y, Z, ...) may be different from the order of the original arguments [Y, Z, ...] due to type caching.\r\n\r\n(from the [current Python docs](https://docs.python.org/3/library/typing.html#typing.get_args))\r\n\r\nThis at least rules out a recursive solution using `get_args` for nested models (which I was toying around with). If I understand this correctly, I believe it also means that the difference between e.g. `List[Union[float, int]]` and `List[Union[int, float]]` would be inaccessible in Python itself, which is a problem.\r\n\r\nThinking from the perspective of a downstream library depending on `pydantic` I suppose one could implement a workaround for the nested problem(i.e. `List[Union[int, float]] == List[Union[float, int]]`) by replacing `Union` with a custom model, say `OrderedUnion`, which overrides `__eq__`. Such a model could even be included in `pydantic` if there is great enough need for this.\r\n\r\nHowever, all of this does not disqualify the simple `get_args` fix suggested by @PrettyWood. It would still work for the top level `Union` case, as in my example code, which is really the only thing I need for my code anyway. Also, I suppose the fact that a solution to the nested problem would be dependent on the above-mentioned \"feature\" in Python is really an argument for only solving the simple non-nested issue now. So, in a sense, I would argue that this complication might actually make this issue simpler to manage, at least for now.\nConfirmed, I think we should fix this in V1.10.\r\n\r\nPR welcome, it'll need to be pretty quick to make it into the next patch release #4472, but otherwise it can be included in the (inevitable) next patch release.\nI can give it a try.\ngreat, thanks.\nI'm getting the following mypy errors:\r\n```\r\npydantic/generics.py:65: error: Argument 1 to \"get_args\" has incompatible type \"Union[Type[Any], Tuple[Type[Any], ...]]\"; expected \"Type[Any]\" [arg-type]\r\npydantic/generics.py:131: error: Argument 1 to \"get_args\" has incompatible type \"Tuple[Type[Any], ...]\"; expected \"Type[Any]\" [arg-type]\r\n```\r\n\r\nWhich seems to be a bug in mypy: https://github.com/python/mypy/issues/4625\r\n\r\nAny thoughts on how to handle this?\nhard without seeing the change, create the PR so I can see it.\nBasically, the type of the `params` parameter specified in `GenericModel.__class_getitem__` is incompatible with the `Type[Any]` in the `get_args` method, according to `mypy`. As far as I can understand, `Type[Any]` is supposed to allow any type, including `Union` and `Tuple`. It makes sense to me that this is a mypy bug, but I might have misread the mypy issue, as the issue is not exactly the same.\r\n\r\nAnyway, I'll add a test and submit the PR for you to see.\nI'd love to include this fix in v1.10.2, any chance you can submit the PR asap so I can review and merge it?":1,"@RomanVeretenov Does it happen after specific dvc command?\n@RomanVeretenov Could you show us `git check-ignore $(pwd)/.dvc/tmp` output, please? Is there anything special about your repo location?\n@efiop it happens on older repos. Try to clone our get started and run a few commands (pull, checkout, etc).\n> @RomanVeretenov Could you show us `git check-ignore $(pwd)/.dvc/tmp` output, please? Is there anything special about your repo location?\r\n\r\noutput is empty\n@shcheklein It does, but it only adds the missing `/tmp`(as it is supposed to be), not adding it multiple times as in this issue. I am not able to reproduce the issue.\r\n\r\n@RomanVeretenov Ok, that is bad, it means that git is not seeing that `/tmp` in `.gitignore` or not understanding it right. Is there anything special about the location that your repo is located in?\r\n\r\nCould you show:\r\n```\r\n$ python -c 'import os; print(os.getcwd())'\r\n$ python -c 'import os; print(os.path.realpath(os.getcwd()))'\r\n```\r\nplease?\r\n\r\nSo far this looks like there is something wrong with your environment or git specifically.\n```\r\n$ python -c 'import os; print(os.getcwd())'\r\n/home/ds\r\n$ python -c 'import os; print(os.path.realpath(os.getcwd()))'\r\n/home/ds\r\n```\r\n\r\nyep, the repo root is '/home/ds' =)\n@RomanVeretenov Using `/home/ds` is questionable, but nothing functionally wrong about it :slightly_smiling_face: Looks alright. Maybe you have `.dvc/` gitignored somewhere or something? \r\n\r\nNeed to check your `.gitignore`s in `/home` and `/home/ds` to see if they are sane(maybe they have something like `!!.dvc/tmp`, `!!tmp` or something that forces git to not ignore `.dvc/tmp`. Would also check global gitignore rules, if you have those.\nThere is no /home/.gitignore\r\nAlso I have no global gitignore\r\nAlso .git/info/exclude is empty\r\n\r\n```\r\n/home/ds % cat .gitignore\r\n\r\n*.jpg\r\n*.JPG\r\n*.png\r\n*.PNG\r\nextracted.csv\r\n```\r\n\r\nalso I have recursively listed all gitignore's \r\nNone of them but .dvc/.gitignore conatins tmp\r\n\r\n```\r\nfind . -name '.gitignore' -exec echo {} \\; -exec grep tmp {} \\;\r\n... here goes gitignores from all nested folders\r\n./.dvc/.gitignore\r\n/tmp\r\n/tmp\r\n./.gitignore\r\n...\r\n```\r\n\r\nEach `dvc status` call adds a /tmp to the end of .dvc/.gitignore\n@RomanVeretenov Could you try to reproduce it with a newly created dvc project? E.g.\r\n```\r\nmkdir myrepo\r\ncd myrepo\r\ngit init\r\ndvc init\r\ngit add .\r\ngit commit -m \"init\"\r\ndvc status\r\ngit status\r\n```\r\n\r\nI'm still not able to reproduce your issue, most likely there is something off with your environment, can't put my finger on anything yet.\n> @RomanVeretenov Could you try to reproduce it with a newly created dvc project? E.g.\r\n> \r\n> ```\r\n> mkdir myrepo\r\n> cd myrepo\r\n> git init\r\n> dvc init\r\n> git add .\r\n> git commit -m \"init\"\r\n> dvc status\r\n> git status\r\n> ```\r\n> \r\n> I'm still not able to reproduce your issue, most likely there is something off with your environment, can't put my finger on anything yet.\r\n\r\nIt works ok on a clean repo\r\n\r\n```\r\n ~/code/myrepo/.dvc\r\n % cat .gitignore\r\n/config.local\r\n/updater\r\n/lock\r\n/updater.lock\r\n/tmp\r\n/state-journal\r\n/state-wal\r\n/state\r\n/cache\r\n```\r\n\r\nafter executing all given commands\r\n\n> It works ok on a clean repo\r\n\r\n@RomanVeretenov So there is something with your .gitignores in the rest of the project. You'll have to take a look and see what is different between it and a clean one. It might be .gitignores somewhere, might be the fact that you are in `/home/ds` (esp if you are working as `ds` user), might be that you don't have permissions to access some files (again, if your git project is not owned by you and you don't have shared mode enabled in git config). But it is clear that this is not dvc-issue, it is your git repo and environment that are not quite right. Sorry, but you are pretty much on your own right now, as I can't put my finger on anything specific :slightly_frowning_face: \nPlease let us know how it goes. I'll close this issue for now.\n@RomanVeretenov \r\n\r\nWhen `/tmp` is already part of the `.dvc/.gitignore`:\r\n\r\nwhat does `git check-ignore \".dvc/tmp\"` return?\r\n\r\nalso, what will the script like this return in your case:\r\n\r\n```\r\nfrom dvc.scm.git import Git\r\ngit = Git()\r\ngit._ignored(\".dvc/tmp\")\r\n```\r\n\r\nI wonder if it's some yet-another-gitpython's-bug.\n@shcheklein Already checked: https://github.com/iterative/dvc/issues/3561#issuecomment-606737795 , it is not gitpython.\n@RomanVeretenov @efiop \r\n\r\n> @shcheklein Already checked: #3561 (comment) , it is not gitpython.\r\n\r\nsorry, missed that ... this is really weird!\r\n\r\n@RomanVeretenov can you just do it all manually. Save the old `.dvc/.gitignore` somewhere, create a new one with a single entry `/tmp` and and run from the project's root `git check-ignore .dvc/tmp`? \r\n\r\nDo you use submodules or any other advanced Git's stuff?\nWill re-check everything next week\nI was having this issue and discussed it with Ivan on Discord. Here is part of our conversation.\r\n\r\n> For some reason on my repo, running\r\n> echo \"test 1\" >> models/README.md\r\n> dvc add models\r\n> echo \"test 2\" >> models/README.md\r\n> dvc add models\r\n> \r\n> Is appending models to .gitignore twice. I tested it on a new repo and you're right it doesn't append it more than once. I'll try to reset dvc and see if that works\r\n\r\n\r\nI tried DVC destroy and started again. Didn't help. The only thing that worked is that I started with a clean models folder, added to dvc/git/.gitignore, and then added the data to it.\nFrom https://discordapp.com/channels/485586884165107732/563406153334128681/703247236368302091\r\n\r\n```\r\ngit init\r\nmkdir models\r\necho \"test 1\" >> models/README.md\r\ngit add models/README.md\r\ngit commit -m \"Add requirements file\"\r\ngit checkout -b b1\r\ndvc init\r\ndvc add models\r\ngit add models.dvc .gitignore\r\ncat .gitignore\r\necho \"test 2\" >> models/README.md\r\ndvc add models\r\ncat .gitignore\r\n```\r\n\r\nSo looks like we should handle it better on DVC side. I suppose @RomanVeretenov had something similar. Reopening\nOk, looks like we broke `is_tracked` during one of the rounds of optimizing dvc for a use case with many thousands of dvc-files.\r\n```\r\nPython 3.7.0 (default, Dec 26 2018, 22:48:20)\r\n[GCC 7.3.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information\r\n>>> from dvc.scm.git import Git\r\n>>> g = Git(\".\")\r\n>>> g.is_tracked(\"tests/__init__.py\")\r\nTrue\r\n>>> g.is_tracked(\"tests\")\r\nFalse\r\n>>> g.is_tracked(\"tests/\")\r\nFalse\r\n>>> g.is_tracked(\"tests/func\")\r\nFalse\r\n>>> def func(path):\r\n... return bool(g.repo.git.ls_files(path))\r\n...\r\n>>> func(\"tests\")\r\nTrue\r\n>>> func(\"tests/\")\r\nTrue\r\n>>> func(\"tests/unit\")\r\nTrue\r\n```\r\n`func` represents an old implementation and works like charm. Need to look into it. And, more importantly, add better tests for it.\r\n\r\nEDIT: it was an older bug after all.\nBig thanks to @ammarasmro for investigating! :pray: \nGlad I could help :)":1,"Hi @dchichkov !\r\n\r\nGreat request! We actually have one `-C` in `dvc repro`. It has been there for a long time, but we haven't heard any requests about other commands (nor about that one, really) ever, so it is interesting to get this feature request :slightly_smiling_face: The implementation should be pretty simple: we'd need to add `-C` to the parent parser https://github.com/iterative/dvc/blob/1.0.0a11/dvc/cli.py#L123 and chdir in `dvc/command/base.py:CmdBase` accordingly.\r\n\r\nMaybe you would like to give a shot implementing it? :slightly_smiling_face: We'll try to help with everything we can.\nIf @dchichkov doesn’tt want to implement, I’d like the chance. \n@JosephTLucas Sounds great! Please feel free! :pray: ":1,"Happy to accept a PR to fix this.\r\n\r\nI guess we should:\r\n* raise the correct error resulting in a validation error if the datetime is out of bounds\r\n* accept negative timestamps if they result in valid datetimes - there's nothing implicitly wrong with the unix timestamp of `-1`. Stuff happened before 1970 apparently.\r\n* comparing with `MS_WATERSHED` as an absolute number makes sense I suppose, we would just need to update the docs to be explicit.\nI don't get this :\r\n\r\n> * raise the correct error resulting in a validation error if the datetime is out of bounds\r\n\r\nFor the rest, everything is running fine and ready for PR\r\n\r\n\nI think it might be fine, I just meant:\r\n\r\nmake sure passing timestamps like `1_000_000_000_000_000` or `-100_000_000_000_000` cause sensible validation errors not other exceptions.\nYes, it runs fine with these timestamps. The generated `datetime` does not match with the real date as these timestamps are greater than `MS_WATERSHED` but it is parsed without any exception.":1,"Seems like it will involve creating a new template.\nSee https://vega.github.io/vega-lite/examples/parallel_coordinate.html for example parallel coordinates plot schema\r\n\r\nThis will also require adding support for parameters in DVC plots, my understanding is that we cannot currently plot values from `params.yaml` since it is not a DVC out\r\n@pared can you confirm this?\n@pmrowla That is correct, currently we are using `find_outs_by_path` to gather plot data. We probably should generalize it to accept any viable file.\r\nRelated: https://github.com/iterative/dvc/issues/4446":1,"More context: https://discuss.dvc.org/t/merging-of-files-in-dvc-tracked-directories/599/5":1,"What should the behavior be for dags that are too wide to display in the user's terminal? Wrapping lines (or truncating it) eliminates the usefulness of the output entirely here, it's not the same as a table where we can merge/hide columns until the table is small enough to fit.\nShould we do `--no-pager` as a default in both of the cases? I use `--no-pager` more often than the default one.\n> What should the behavior be for dags that are too wide\r\n\r\nGood Q. Maybe just truncate it like I think you are saying we do for tables?\r\n\r\n> Should we do --no-pager as a default in both of the cases?\r\n\r\nI also use `--no-pager` all the time haha. At least give it a short flag?\r\n\r\nBTW `git` has `--no-pager` in the top command e.g. `git --no-pager diff`. And it has a setting to configure the default paging behavior/command, I think. More options to consider.\n> More options to consider.\r\n\r\ndetect if you have enough space or not and apply pager if not? that's what git does on my machine, but as we discussed with Ruslan it's not clear how/why it is happening.\nFor most of my scenarios, it is never enough space for the whole table. But I still prefer `--no-pager`.\nBTW I renamed this issue and rewrote the description to reflect what we're actually talking about. To summarize, I propose:\r\n\r\nStep 1. Make it the default for `exp show`\r\nStep 2. Put it directly in `dvc --no-pager` and enable if for `dag` too (but not as default).\r\n\r\nQs:\r\n- How to handle long lines? I say truncate them for now.\r\n- Any other commands that need it? E.g. `list`\r\n\n> detect if you have enough space or not and apply pager if not? that's what git does on my machine, but as we discussed with Ruslan it's not clear how/why it is happening.\r\n\r\nSeems like Git is using `less -FRX` by default to do this: https://git-scm.com/docs/git-config#Documentation/git-config.txt-corepager. It would be great to do the same in dvc and make this the default.\r\n\r\n> * How to handle long lines? I say truncate them for now.\r\n\r\n👍 \r\n\r\n> * Any other commands that need it? E.g. `list`\r\n\r\nIdeally, we can abstract and set all of this at the top level as part of the UI overhaul that's ongoing. Thought @skshetry?\n> > detect if you have enough space or not and apply pager if not? that's what git does on my machine, but as we discussed with Ruslan it's not clear how/why it is happening.\r\n> \r\n> Seems like Git is using `less -FRX` by default to do this: https://git-scm.com/docs/git-config#Documentation/git-config.txt-corepager. It would be great to do the same in dvc and make this the default.\r\n\r\ngood to finally know the cause 😄, anyway it adds support to my proposal https://github.com/iterative/dvc/issues/4060#issuecomment-769846201 ;-)\r\n\nI'm working on keeping the behavior same as `git` as proposed on https://github.com/iterative/dvc/issues/4060#issuecomment-769846201. But very few people will be able to see this as most users will have `PAGER` or `LESS` set. \n> I'm working on keeping the behavior same as `git` as proposed on [#4060 (comment)](https://github.com/iterative/dvc/issues/4060#issuecomment-769846201).\r\n\r\nIt would be great to have similar to Git behavior. However, it is not limiting by --no-pager/less mode. In Git when I quite from the less mode (like `git log`) I still see the output. I would not need `--no-pager` option of `exp run` if this would be the case. But the current default mode clears out the output - that's the problem.\r\n\r\n@skshetry could you please clarify what users will see after quitting the command in less mode?\n@dmpetrov, well I have two ideas:\r\n\r\n1. ~~Print the table in a pager mode and in a non-pager mode in stderr after the pager exits. This way, you will see a non-collapsed form in the table and collapsed form after it exits.~~ Looks like this messes up `redirections`.\r\n2. `DVC_PAGER=\"less -FRSX~\" dvc exp show` -> This way, if the content can fit within the screen (vertically and horizontally), no pager will be used. Otherwise, the pager will be shown where the user's prompt was (without clearing the screen to the top). After that, the content will still be on the user's screen.\r\n\r\n Second one has a disadvantage that when the user scrolls, it will leave the empty patch in the user's terminal. \r\n\r\nthoughts @pmrowla?\n> It would be great to have similar to Git behavior. However, it is not limiting by --no-pager/less mode. In Git when I quite from the less mode (like `git log`) I still see the output.\r\n\r\n@dmpetrov \r\n\r\nThis is not actually default Git behavior (when I quit from `git log` inside a pager I do not see the output). It sounds to me like something in your machine/environment is configuring the system pager to be `less -X`.\r\n\r\nCan you run\r\n```\r\necho $PAGER\r\n```\r\nin a terminal and see what it outputs?\r\n\r\n---\r\n\r\nIMO we should leave the existing defaults, and if the user wants the `-X` behavior, they can configure `DVC_PAGER` as needed, the same way it works with configuring `PAGER` for git.\r\n\r\nIf anything, we could maybe consider just respecting the system `PAGER` instead of using our own env var (so that we would behave consistently with git within a given user's environment).\nYou can disregard my last comment, I see where Dave linked the git docs w/the info about their defaults now.\r\n\r\nUsing the same `less -FRSX` defaults makes sense to me, but we should probably also consider checking `$PAGER` and `$LESS` in the same way that git does as well\r\n\r\n@skshetry \n@pmrowla, `dvc exp show` needs `-R` to work properly. `PAGER` is usually set to `less` and `LESS` is already set up in my machine with `-R` (by ohmyzsh?), but we cannot say the same for everyone. So it feels like we cannot just honour `$LESS`.\r\n\r\nI'd propose we detect if the pager is `less` (with `DVC_PAGER`, `PAGER` and `DEFAULT_PAGER` in order), and apply `-R` even if `$LESS` is specified (this should be applied without us doing anything and deviates from the behavior with `git`). And, if there's no `$LESS` set, we could use `less -FRSX`.\r\n\r\nOtherwise we would need to disable the styles.\n@shcheklein On my machines(linux and mac) git does use the pager even if it can fit log/diff on the screen. I suppose that you have some pager configured that does that. Mind sharing `$echo $PAGER` and `$ git config core.pager`, please? Also if pager is `less`, could you share `$ alias less`?\nAlso, IIRC you are using pycharm, are you running git log from it? If so, could you try doing that from regular terminal too?\nSure, @efiop :\r\n\r\n```\r\n√ Projects/dvc.org % alias less\r\n?1 Projects/dvc.org % echo $PAGER\r\n\r\n√ Projects/dvc.org % git config core.pager\r\n?1 Projects/dvc.org %\r\n```\nFor the record: GIT_PAGER is also empty.\r\n\r\nFor me that results in git always outputting straight to teminal. Not sure what is going on.\n@efiop, @shcheklein, I suspect `less` is told not to paginate if output is less than a page. \r\n\r\nTry:\r\n```sh\r\nexport LESS=\"-F -X $LESS\"\r\ngit branch\r\n```\nBut, by default, git/less always paginates.\n@skshetry That makes sense, but not quite understand why `dvc dag` doesn't do that as well. Might be missing some logic in `dvc/utils/pager.py`, i suppose.\n@skshetry not sure how to check this\r\n\r\n```\r\n√ build/example-get-started % echo $LESS\r\n\r\n```\r\n\r\nit is also empty for me. I didn't change any setting myself.\r\n\r\nI check the default Mac OS terminal - same behavior. Even if there are only two stages in DAG it paginates.\nShould we repurpose this issue to consider this to be an expected behaviour (by adding `-FX` below)?\r\nhttps://github.com/iterative/dvc/blob/fcdb503b4eff1297e5c1c1ed18f5606fe780e481/dvc/utils/pager.py#L15-L17\n@skshetry Sounds good!":1,"We used to clone the repo if `rev` was provided before, nowadays we don't do that (and hope `scm` copes with that), but we don't properly initialize them. \r\nThe easy fix might be to rollback to that behaviour, or fix this initialization process. \nAnother user running into this https://discordapp.com/channels/485586884165107732/485596304961962003/831501540484972564\nIf we're counting the number of users running into this: +1 :stuck_out_tongue: ":1,"@andronovhopf Indeed, md table is not complete without the trailing newline, which is a bit non-intuitive when compared to regular cli workflow (at least for me), but it totally makes sense to make this right. Looks like right now only cml is using this feature, so not too worried about breaking something for someone else. Will send a fix ASAP.\nHm, though it would still feel a bit weird when the table is the last thing in the file and you have a trailing newline for no reason. :thinking: \nAlso, not using a trailing newline means that you could expand the table on-the-fly. But that's a feature no one has asked for :smile: \nhahaha yes! I think the \"extra newline if the table is the last thing in\nyour report\" is probably only a minor weirdness. i imagine (although can't\nbe positive yet) that more people will have to debug \"why is everything\ngetting sucked into my table?\" than \"why is there an extra line after my\ntable?\"\n\nOn Fri, Jun 26, 2020 at 11:54 AM Ruslan Kuprieiev \nwrote:\n\n> Also, not using a trailing newline means that you could expand the table\n> on-the-fly. But that's a feature no one has asked for 😄\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> , or\n> unsubscribe\n> \n> .\n>\n":1,"It's a matter of taste, I prefer running tests first.\r\n\r\nIt doesn't make much difference since all parts are pretty fast for pydantic.\r\n\r\nHappy to change if others also want a change.\r\n\r\nOn Wed, Nov 27, 2019, 23:44 Arseny Boykov wrote:\r\n\r\n> Question\r\n>\r\n> Currently calling make in project will first run all tests and only after\r\n> that will run lint checks.\r\n> Is there any reason for that?\r\n> It doesn't make sense to me as code analysis are complete much faster. Now\r\n> I have to either run code analyses separately before running make, or wait\r\n> while all tests will pass.\r\n>\r\n> So can we change this:\r\n>\r\n> .PHONY: all\r\n> all: testcov lint mypy\r\n>\r\n> to this:\r\n>\r\n> .PHONY: all\r\n> all: lint mypy testcov\r\n> ?\r\n>\r\n> —\r\n> You are receiving this because you are subscribed to this thread.\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n>\r\n\nI’m fine either way. I’d say I have a weak preference for the order @MrMrRobat has proposed though.\r\n\r\nI frequently find myself hitting a stupid linting issue after running all the tests, then get annoyed because I feel like I should probably make sure everything works after fixing the linting issue by running the tests again. But it’s a pretty minor thing either way.\nOkay let's change. PR welcome\n\nOn Thu, Nov 28, 2019, 09:21 dmontagu wrote:\n\n> I’m fine either way. I’d say I have a weak preference for the order\n> @MrMrRobat has proposed though.\n>\n> I frequently find myself hitting a stupid linting issue after running all\n> the tests, then get annoyed because I feel like I should probably make sure\n> everything works after fixing the linting issue by running the tests again.\n> But it’s a pretty minor thing either way.\n>\n> —\n> You are receiving this because you commented.\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n>\n":1,"This one is a known issue since the config refactor. Workaround is to simply first do:\r\n```\r\ndvc remote add storage s3://storage --local\r\n```\njust to let you know this breaks all the docs/examples":1,"Looks like we should also introduce our `-n` flag to `dvc gc` and `dvc exp gc`\n@karajan1001 How much time would it take to implement this?\n> @karajan1001 How much time would it take to implement this?\r\n\r\nAs we had already refactored the branched I think it'll cost about 1-2 days. ":1,"afaik `TypedDict` is supported by pydantic, see #760.\r\n\r\nAnything that does work will be flakey, will not perform proper validation and could break at any time.\r\n\r\nI think best to explicitly raise an error whenever a `TypedDict` is used, saying\r\n\r\n>`TypedDict` is not yet supported, see #760\r\n\nI think `TypedDict` fields were usable with pydantic==1.4, but, as far as I can tell, the above `TypeError` does occur in pydantic>=1.5. \r\n\r\nI think you're right, though, and it probably is best to raise an explicit error when `TypedDict` is used as a model field until `TypedDict` is fully supported. \nI confirm `TypedDict` were usable with pydantic 1.4\nThere's no logic for validating `TypedDict`, so while you might have been able to use them, I very much doubt it was a good idea.\r\n\r\nWe should either support them fully or raise a sensible exception explaining that they don't work.\r\n\r\nThis is an issue about raising a better error, #760 is about supporting them fully.\nI'm aware that no logic validation is currently performed by _pydantic_ and it is fine with me. \r\n\r\n> We should either support them fully or raise a sensible exception explaining that they don't work.\r\n\r\nThere is also the option of treating them as `Dict[Any, Any]` for validation (they are dict at run-time after all). That way, one could still get proper _mypy_ warnings during development and pydantic validation would just reduce to `isinstance(variable, dict)` \r\n\r\nv1.4 behaved like this:\r\n\r\n```\r\nIn []: from typing import TypedDict \r\nIn []: from pydantic import BaseModel\r\n \r\nIn []: class MyDict(TypedDict): \r\n ...: a: int\r\n \r\nIn []: class A(BaseModel): \r\n ...: d: MyDict \r\n\r\nIn []: A(d={}) \r\nOut[]: A(d={})\r\n\r\nIn []: A(d=12) \r\n---------------------------------------------------------------------------\r\nValidationError: 1 validation error for A\r\nd\r\n value is not a valid dict (type=type_error.dict)\r\n```\r\n\r\n\n> There's no logic for validating TypedDict\r\n\r\nActually TypedDict does contain validation instructions\r\n\r\n```\r\n>>> from typing import TypedDict\r\n\r\n>>> class Person(TypedDict):\r\n... name: str\r\n... age: int\r\n\r\n>>> Person.__annotations__\r\n{'name': , 'age': }\r\n```\r\n\r\nSo technically Pydantic should be able to validate dictionaries that are type hinted with `TypedDicts`\nYes, it's possible, now someone just needs to go and implement the feature. 😄 \nI currently have a working solution for this which does validation based on the TypedDict annotations as suggested. I'm currently writing test cases and will post an update when I've got full coverage.\nMy solution is here, but there are parts of it that I think could probably be done better:\r\n[https://github.com/kpberry/pydantic/commits/typed-dict-support](url)\r\n\r\n1. `TypedDict` does not support `issubclass` checks (which was the initial issue in this thread), so in order to check if a type is a subtype of `TypedDict` in `ModelField._type_analysis`, it seems like we need to check if it is a subclass of `TypedDict`'s metaclass. Unfortunately, `_TypedDictMeta` is protected in both the typing and typing_extensions module, so the mypy check won't allow it to be imported (probably for good reason). For now, I check `self.type_.__class__.__name__ == '_TypedDictMeta'`, but I think there must be a better way to do this.\r\n2. I'm not sure what the best spot is in `ModelField._type_analysis` to do the `TypedDict` subclass check. I put it before the origin checks to avoid the issue at the top of this thread, but there might be a better spot for it.\r\n3. I added each `TypedDict` value type to `self.sub_fields` in `ModelField._type_analysis`, since it fit the pattern for validating `Tuple`s, etc. However, since each value corresponds to a specific named key, I had to add a `key_name` attribute to `ModelField` with the key name in order to implement `ModelField._validate_typeddict`. I'm not sure if it's a good idea to add new attributes to `ModelField` for this, especially considering that it's a relatively niche feature.\r\n\r\nWould appreciate feedback/suggestions before trying to make a pull request.\r\n\r\nEdit: Forgot to mention, all tests are passing and all lines that I added should be covered.\r\nEdit 2: I noticed that there's an optional `total` parameter to `TypedDict`, so I added support and tests for that.":1,"@danfischetti Could you please post a cProfile log from your specific use case so we can see more clearly what is slowing you down? :slightly_smiling_face: \n@danfischetti Also, did the change from yesterday make any difference for you?\nI just updated to 0.65.0 and I don't see any difference.\r\n\r\n```\r\nIn [7]: cProfile.run(\"repo.collect_stages()\") [389/1983]\r\n^C 329107531 function calls (317924667 primitive calls) in 113.090 seconds\r\n\r\n Ordered by: standard name\r\n\r\n ncalls tottime percall cumtime percall filename:lineno(function)\r\n 33595 0.019 0.000 0.048 0.000 :997(_handle_fromlist)\r\n 1 0.000 0.000 113.095 113.095 :1()\r\n 99792 0.029 0.000 0.051 0.000 :12(__new__)\r\n 5764 0.022 0.000 0.980 0.000 __init__.py:100(loadd_from)\r\n 17293 0.004 0.000 0.004 0.000 __init__.py:110(tree)\r\n215138/5764 0.225 0.000 0.447 0.000 __init__.py:319(convert_to_unicode)\r\n 3389 0.008 0.000 0.217 0.000 __init__.py:350(dvc_walk)\r\n 3388 0.007 0.000 49.910 0.015 __init__.py:371(_filter_out_dirs)\r\n 3444 3.412 0.001 49.902 0.014 __init__.py:373(filter_dirs)\r\n 1 0.042 0.042 113.095 113.095 __init__.py:382(collect_stages)\r\n 5764 0.015 0.000 0.316 0.000 __init__.py:453(relpath)\r\n 8151 0.045 0.000 1.234 0.000 __init__.py:52(_get)\r\n 5764 0.024 0.000 0.856 0.000 __init__.py:60(_get)\r\n 5764 0.014 0.000 1.305 0.000 __init__.py:71(loadd_from)\r\n 89676 0.101 0.000 0.221 0.000 _collections_abc.py:879(__iter__)\r\n 13915 0.010 0.000 0.103 0.000 _collections_abc.py:966(append)\r\n 130306 0.046 0.000 0.046 0.000 _weakrefset.py:70(__contains__)\r\n 130306 0.055 0.000 0.101 0.000 abc.py:178(__instancecheck__)\r\n 13915 0.018 0.000 0.020 0.000 base.py:115(_check_requires)\r\n 11528 0.003 0.000 0.003 0.000 base.py:127(scheme)\r\n 5764 0.005 0.000 0.009 0.000 base.py:142(cache)\r\n 71962 0.038 0.000 0.400 0.000 base.py:150(supported)\r\n 71962 0.052 0.000 0.362 0.000 base.py:158(supported)\r\n 13915 0.019 0.000 0.086 0.000 base.py:419(_validate_output_path)\r\n 13915 0.057 0.000 1.450 0.000 base.py:72(__init__)\r\n 13915 0.042 0.000 0.082 0.000 base.py:92(__init__)\r\n 5764 0.002 0.000 0.002 0.000 codecs.py:259(__init__)\r\n 5764 0.005 0.000 0.007 0.000 codecs.py:308(__init__)\r\n 17386 0.015 0.000 0.025 0.000 codecs.py:318(decode)\r\n 17386 0.003 0.000 0.003 0.000 codecs.py:330(getstate)\r\n28183 0.005 0.000 0.005 0.000 comments.py:100(__init__) [353/1983]\r\n 28183 0.004 0.000 0.004 0.000 comments.py:108(set_block_style)\r\n 28183 0.011 0.000 0.011 0.000 comments.py:126(__init__)\r\n 65153 0.024 0.000 0.024 0.000 comments.py:132(add_kv_line_col)\r\n 13915 0.007 0.000 0.007 0.000 comments.py:159(add_idx_line_col)\r\n 13915 0.015 0.000 0.027 0.000 comments.py:182(ca)\r\n 28183 0.030 0.000 0.060 0.000 comments.py:269(fa)\r\n 135434 0.084 0.000 0.165 0.000 comments.py:304(lc)\r\n 28183 0.020 0.000 0.121 0.000 comments.py:311(_yaml_set_line_col)\r\n 65153 0.038 0.000 0.114 0.000 comments.py:316(_yaml_set_kv_line_col)\r\n 13915 0.008 0.000 0.026 0.000 comments.py:320(_yaml_set_idx_line_col)\r\n 33594 0.146 0.000 16.608 0.000 comments.py:353(copy_attributes)\r\n 34016 0.043 0.000 3.137 0.000 comments.py:381(__init__)\r\n 89676 0.040 0.000 0.040 0.000 comments.py:385(__getsingleitem__)\r\n 22419 0.010 0.000 0.010 0.000 comments.py:410(__len__)\r\n 13915 0.023 0.000 0.079 0.000 comments.py:414(insert)\r\n 8504 0.005 0.000 0.005 0.000 comments.py:423(extend)\r\n 8504 0.004 0.000 0.004 0.000 comments.py:46(__init__)\r\n 8504 0.042 0.000 16.621 0.002 comments.py:476(__deepcopy__)\r\n 19679 0.004 0.000 0.004 0.000 comments.py:563(__init__)\r\n 84832 0.051 0.000 0.089 0.000 comments.py:610(__iter__)\r\n 39358 0.048 0.000 0.048 0.000 comments.py:635(__init__)\r\n 13915 0.002 0.000 0.002 0.000 comments.py:64(items)\r\n 210012 0.085 0.000 0.106 0.000 comments.py:744(__getitem__)\r\n 130306 0.126 0.000 0.201 0.000 comments.py:754(__setitem__)\r\n 232430 0.106 0.000 0.106 0.000 comments.py:773(__contains__)\r\n 48499 0.023 0.000 0.082 0.000 comments.py:777(get)\r\n 31207 0.029 0.000 0.034 0.000 comments.py:794(__delitem__)\r\n 84832 0.026 0.000 0.026 0.000 comments.py:814(__iter__)\r\n 84832 0.020 0.000 0.020 0.000 comments.py:819(_keys)\r\n 39358 0.016 0.000 0.016 0.000 comments.py:824(__len__)\r\n 19679 0.010 0.000 0.014 0.000 comments.py:898(items)\r\n19679/5764 0.128 0.000 17.277 0.003 comments.py:942(__deepcopy__)\r\n 13916 0.002 0.000 0.002 0.000 compat.py:178()\r\n 5764 0.012 0.000 0.021 0.000 compat.py:252(version_tnf)\r\n 89676 0.064 0.000 0.120 0.000 compat.py:266(__getitem__)\r\n149985/5764 0.584 0.000 33.287 0.006 composer.py:109(compose_node) [317/1983]\r\n 121802 0.341 0.000 2.292 0.000 composer.py:142(compose_scalar_node)\r\n 8504 0.064 0.000 24.126 0.003 composer.py:161(compose_sequence_node)\r\n19679/5764 0.241 0.000 32.750 0.006 composer.py:194(compose_mapping_node)\r\n 28183 0.006 0.000 0.006 0.000 composer.py:228(check_end_doc_comment)\r\n 5764 0.005 0.000 0.008 0.000 composer.py:33(__init__)\r\n 817820 0.527 0.000 1.524 0.000 composer.py:40(parser)\r\n 449955 0.295 0.000 0.859 0.000 composer.py:47(resolver)\r\n 5764 0.022 0.000 34.501 0.006 composer.py:70(get_single_node)\r\n 5764 0.013 0.000 33.408 0.006 composer.py:95(compose_document)\r\n 5764 0.012 0.000 36.254 0.006 constructor.py:106(get_single_data)\r\n 121802 0.084 0.000 0.097 0.000 constructor.py:1063(construct_scalar)\r\n 5764 0.016 0.000 1.707 0.000 constructor.py:114(construct_document)\r\n 104510 0.055 0.000 0.154 0.000 constructor.py:1266(construct_yaml_str)\r\n149985/45826 0.256 0.000 1.420 0.000 constructor.py:128(construct_object)\r\n 8504 0.034 0.000 1.093 0.000 constructor.py:1281(construct_rt_sequence)\r\n 19679 0.043 0.000 0.050 0.000 constructor.py:1306(flatten_mapping)\r\n19679/5764 0.203 0.000 1.579 0.000 constructor.py:1393(construct_mapping)\r\n 17008 0.024 0.000 1.201 0.000 constructor.py:1528(construct_yaml_seq)\r\n39358/11528 0.151 0.000 1.665 0.000 constructor.py:1538(construct_yaml_map)\r\n 28183 0.031 0.000 0.119 0.000 constructor.py:1546(set_collection_style)\r\n 17292 0.009 0.000 0.042 0.000 constructor.py:1729(construct_yaml_bool)\r\n 65153 0.028 0.000 0.062 0.000 constructor.py:254(check_mapping_key)\r\n 17292 0.016 0.000 0.033 0.000 constructor.py:443(construct_yaml_bool)\r\n 5764 0.016 0.000 0.041 0.000 constructor.py:60(__init__)\r\n 5764 0.005 0.000 0.034 0.000 constructor.py:75(composer)\r\n9997354/5764 7.516 0.000 17.306 0.003 copy.py:132(deepcopy)\r\n 7456429 0.526 0.000 0.526 0.000 copy.py:190(_deepcopy_atomic)\r\n 2311178 2.280 0.000 7.435 0.000 copy.py:210(_deepcopy_list)\r\n 33594 0.069 0.000 0.345 0.000 copy.py:219(_deepcopy_tuple)\r\n 33594 0.024 0.000 0.273 0.000 copy.py:220()\r\n100782/67188 0.981 0.000 15.075 0.000 copy.py:236(_deepcopy_dict)\r\n 2540925 0.979 0.000 1.345 0.000 copy.py:252(_keep_alive)\r\n 67188 0.212 0.000 15.901 0.000 copy.py:268(_reconstruct)\r\n 134376 0.037 0.000 0.122 0.000 copy.py:273()\r\n 13915 0.011 0.000 0.014 0.000 copy.py:66(copy)\r\n 67188 0.028 0.000 0.039 0.000 copyreg.py:87(__newobj__) [281/1983]\r\n 512368 0.131 0.000 0.131 0.000 error.py:30(__init__)\r\n 5764 0.003 0.000 0.005 0.000 events.py:112(__init__)\r\n 121802 0.081 0.000 0.178 0.000 events.py:125(__init__)\r\n 201224 0.075 0.000 0.075 0.000 events.py:17(__init__)\r\n 149985 0.068 0.000 0.124 0.000 events.py:42(__init__)\r\n 28183 0.019 0.000 0.045 0.000 events.py:51(__init__)\r\n 5764 0.005 0.000 0.008 0.000 events.py:80(__init__)\r\n 5764 0.004 0.000 0.006 0.000 events.py:93(__init__)\r\n 5764 0.016 0.000 0.034 0.000 fnmatch.py:48(filter)\r\n 8 0.000 0.000 0.000 0.000 future.py:47(__del__)\r\n 5764 0.005 0.000 0.042 0.000 genericpath.py:16(exists)\r\n 5764 0.006 0.000 0.020 0.000 genericpath.py:27(isfile)\r\n 5764 0.012 0.000 0.021 0.000 genericpath.py:69(commonprefix)\r\n 11528 0.174 0.000 0.229 0.000 glob.py:114(_iterdir)\r\n 34584 0.017 0.000 0.067 0.000 glob.py:145(has_magic)\r\n 11528 0.005 0.000 0.005 0.000 glob.py:152(_ishidden)\r\n 5764 0.003 0.000 0.003 0.000 glob.py:22(iglob)\r\n17292/5764 0.035 0.000 0.501 0.000 glob.py:39(_iglob)\r\n 5764 0.018 0.000 0.285 0.000 glob.py:79(_glob1)\r\n 11528 0.004 0.000 0.005 0.000 glob.py:82()\r\n 5764 0.007 0.000 0.067 0.000 glob.py:85(_glob0)\r\n 5764 0.010 0.000 0.514 0.000 glob.py:9(glob)\r\n 3388 0.003 0.000 0.005 0.000 ignore.py:57(__call__)\r\n 3388 0.002 0.000 0.002 0.000 ignore.py:58()\r\n 3388 0.005 0.000 0.010 0.000 ignore.py:76(__call__)\r\n 13915 0.068 0.000 1.171 0.000 local.py:20(_parse_path)\r\n 5764 0.005 0.000 0.024 0.000 local.py:41(fspath)\r\n 13915 0.040 0.000 0.128 0.000 local.py:52(__init__)\r\n 11528 0.019 0.000 0.044 0.000 main.py:167(reader)\r\n 2008844 0.316 0.000 0.387 0.000 main.py:176(scanner)\r\n 1259907 0.802 0.000 1.404 0.000 main.py:185(parser)\r\n 5764 0.012 0.000 0.028 0.000 main.py:207(composer)\r\n 5764 0.019 0.000 0.073 0.000 main.py:215(constructor)\r\n 642674 0.441 0.000 0.785 0.000 main.py:225(resolver)\r\n 5764 0.043 0.000 36.737 0.006 main.py:316(load)\r\n 5764 0.016 0.000 0.381 0.000 main.py:375(get_constructor_parser)\r\n 5764 0.056 0.000 0.638 0.000 main.py:61(__init__)\r\n 5764 0.018 0.000 0.582 0.000 main.py:615(official_plug_ins)\r\n 5764 0.002 0.000 0.002 0.000 main.py:619()\r\n 19679 0.013 0.000 0.036 0.000 nodes.py:117(__init__)\r\n 149985 0.061 0.000 0.061 0.000 nodes.py:15(__init__)\r\n 121802 0.073 0.000 0.120 0.000 nodes.py:81(__init__)\r\n 28183 0.020 0.000 0.034 0.000 nodes.py:92(__init__)\r\n27320/3394 0.041 0.000 0.199 0.000 os.py:277(walk)\r\n 199584 0.097 0.000 0.116 0.000 parse.py:109(_coerce_args)\r\n 99792 0.140 0.000 0.499 0.000 parse.py:359(urlparse)\r\n 99792 0.133 0.000 0.233 0.000 parse.py:392(urlsplit)\r\n 569 0.000 0.000 0.002 0.000 parse.py:83(clear_cache)\r\n 199584 0.017 0.000 0.017 0.000 parse.py:98(_noop)\r\n 5764 0.007 0.000 0.017 0.000 parser.py:101(__init__)\r\n 11528 0.014 0.000 0.014 0.000 parser.py:108(reset_parser)\r\n 5764 0.003 0.000 0.011 0.000 parser.py:118(dispose)\r\n 2008844 1.021 0.000 1.682 0.000 parser.py:122(scanner)\r\n 5764 0.003 0.000 0.011 0.000 parser.py:129(resolver)\r\n 466611 0.343 0.000 28.242 0.000 parser.py:136(check_event)\r\n 149985 0.032 0.000 0.032 0.000 parser.py:150(peek_event)\r\n 201224 0.063 0.000 0.935 0.000 parser.py:158(get_event)\r\n 5764 0.021 0.000 0.792 0.000 parser.py:173(parse_stream_start)\r\n 11528 0.031 0.000 0.208 0.000 parser.py:185(parse_implicit_document_start)\r\n 5764 0.018 0.000 0.076 0.000 parser.py:203(parse_document_start)\r\n 5764 0.018 0.000 0.080 0.000 parser.py:236(parse_document_end)\r\n 19679 0.012 0.000 1.351 0.000 parser.py:319(parse_block_node)\r\n 130306 0.089 0.000 6.997 0.000 parser.py:327(parse_block_node_or_indentless_sequence)\r\n 149985 0.996 0.000 8.246 0.000 parser.py:335(parse_node)\r\n 22419 0.075 0.000 1.971 0.000 parser.py:532(parse_indentless_sequence_entry)\r\n 19679 0.028 0.000 1.671 0.000 parser.py:555(parse_block_mapping_first_key)\r\n 84832 0.330 0.000 7.132 0.000 parser.py:561(parse_block_mapping_key)\r\n 65153 0.317 0.000 17.919 0.000 parser.py:587(parse_block_mapping_value)\r\n 27830 0.022 0.000 0.401 0.000 path_info.py:29(__new__)\r\n 19679 0.011 0.000 0.067 0.000 path_info.py:57(__fspath__)\r\n 5764 0.002 0.000 0.019 0.000 path_info.py:60(fspath) [210/1983]\r\n 142121 0.044 0.000 0.057 0.000 pathlib.py:282(splitroot)\r\n 41715 0.258 0.000 0.469 0.000 pathlib.py:51(parse_parts)\r\n 41715 0.137 0.000 0.668 0.000 pathlib.py:629(_parse_args)\r\n 41715 0.047 0.000 0.730 0.000 pathlib.py:649(_from_parts)\r\n 19679 0.018 0.000 0.026 0.000 pathlib.py:672(_format_parsed_parts)\r\n 41715 0.005 0.000 0.005 0.000 pathlib.py:679(_init)\r\n 19679 0.030 0.000 0.056 0.000 pathlib.py:689(__str__)\r\n 13885 0.011 0.000 0.362 0.000 pathlib.py:897(__rtruediv__)\r\n 13915 0.003 0.000 0.003 0.000 pathlib.py:915(is_absolute)\r\n 11528 0.018 0.000 0.029 0.000 posixpath.py:102(split)\r\n 18942 0.018 0.000 0.035 0.000 posixpath.py:142(basename)\r\n 23056 0.041 0.000 0.071 0.000 posixpath.py:152(dirname)\r\n 3387 0.004 0.000 0.030 0.000 posixpath.py:166(islink)\r\n 5764 0.008 0.000 0.047 0.000 posixpath.py:176(lexists)\r\n 9191693 29.449 0.000 46.949 0.000 posixpath.py:329(normpath)\r\n 42736 0.048 0.000 0.438 0.000 posixpath.py:367(abspath)\r\n 142704 0.039 0.000 0.064 0.000 posixpath.py:39(_get_sep)\r\n 5764 0.032 0.000 0.199 0.000 posixpath.py:444(relpath)\r\n 5764 0.003 0.000 0.003 0.000 posixpath.py:466()\r\n 5764 0.004 0.000 0.004 0.000 posixpath.py:467()\r\n 5764 0.004 0.000 0.007 0.000 posixpath.py:50(normcase)\r\n 42736 0.028 0.000 0.062 0.000 posixpath.py:62(isabs)\r\n 46442 0.108 0.000 0.159 0.000 posixpath.py:73(join)\r\n 5764 0.003 0.000 0.010 0.000 re.py:231(compile)\r\n 5764 0.007 0.000 0.007 0.000 re.py:286(_compile)\r\n 529870 0.073 0.000 0.073 0.000 reader.py:101(stream)\r\n 11528 0.016 0.000 0.213 0.000 reader.py:109(stream)\r\n 4754654 0.934 0.000 0.939 0.000 reader.py:132(peek)\r\n 379321 0.247 0.000 0.287 0.000 reader.py:140(prefix)\r\n 520803 1.687 0.000 1.735 0.000 reader.py:163(forward)\r\n 512368 0.584 0.000 0.784 0.000 reader.py:178(get_mark)\r\n 5764 0.014 0.000 0.188 0.000 reader.py:187(determine_encoding)\r\n 5974 0.006 0.000 0.017 0.000 reader.py:218(_get_non_printable_ascii)\r\n 5974 0.004 0.000 0.021 0.000 reader.py:236(_get_non_printable)\r\n 5974 0.004 0.000 0.025 0.000 reader.py:244(check_printable)\r\n 23313 0.022 0.000 0.083 0.000 reader.py:258(update) [174/1983]\r\n 11738 0.020 0.000 0.132 0.000 reader.py:293(update_raw)\r\n 5764 0.011 0.000 0.025 0.000 reader.py:79(__init__)\r\n 11528 0.014 0.000 0.014 0.000 reader.py:87(reset_reader)\r\n 5764 0.006 0.000 0.010 0.000 resolver.py:115(__init__)\r\n 436323 0.264 0.000 0.782 0.000 resolver.py:124(parser)\r\n 149985 0.039 0.000 0.039 0.000 resolver.py:218(descend_resolver)\r\n 149985 0.032 0.000 0.032 0.000 resolver.py:241(ascend_resolver)\r\n 5764 0.009 0.000 0.020 0.000 resolver.py:319(__init__)\r\n 46112 0.101 0.000 0.173 0.000 resolver.py:327(add_version_implicit_resolver)\r\n 5764 0.001 0.000 0.001 0.000 resolver.py:335(get_loader_version)\r\n 243604 0.169 0.000 0.909 0.000 resolver.py:344(versioned_resolver)\r\n 149985 0.318 0.000 1.391 0.000 resolver.py:357(resolve)\r\n 436323 0.248 0.000 1.031 0.000 resolver.py:382(processing_version)\r\n 6133737 0.720 0.000 0.722 0.000 scanner.py:144(reader)\r\n 121802 2.762 0.000 7.814 0.000 scanner.py:1517(scan_plain)\r\n 186955 0.160 0.000 0.910 0.000 scanner.py:156(scanner_processing_version)\r\n 121802 0.418 0.000 1.056 0.000 scanner.py:1594(scan_plain_spaces)\r\n 1295888 1.388 0.000 19.170 0.000 scanner.py:1756(check_token)\r\n 396047 0.315 0.000 2.883 0.000 scanner.py:1770(peek_token)\r\n 2008844 1.937 0.000 16.845 0.000 scanner.py:1780(_gather_comments)\r\n 316909 0.608 0.000 2.413 0.000 scanner.py:1805(get_token)\r\n 206634 0.491 0.000 1.027 0.000 scanner.py:1843(scan_to_next_token)\r\n 271787 0.244 0.000 0.463 0.000 scanner.py:1912(scan_line_break)\r\n 4082841 2.298 0.000 5.929 0.000 scanner.py:197(need_more_tokens)\r\n 206634 0.626 0.000 13.296 0.000 scanner.py:214(fetch_more_tokens)\r\n 3783113 0.841 0.000 0.841 0.000 scanner.py:326(next_possible_simple_key)\r\n 3989747 2.828 0.000 3.093 0.000 scanner.py:342(stale_possible_simple_keys)\r\n 121802 0.271 0.000 0.549 0.000 scanner.py:362(save_possible_simple_key)\r\n 84832 0.036 0.000 0.060 0.000 scanner.py:386(remove_possible_simple_key)\r\n 212398 0.172 0.000 0.282 0.000 scanner.py:404(unwind_indent)\r\n 79068 0.025 0.000 0.027 0.000 scanner.py:429(add_indent)\r\n 11528 0.027 0.000 0.083 0.000 scanner.py:440(fetch_stream_start)\r\n 5764 0.015 0.000 0.065 0.000 scanner.py:449(fetch_stream_end)\r\n 65153 0.036 0.000 0.036 0.000 scanner.py:56(__init__)\r\n 13915 0.293 0.000 0.376 0.000 scanner.py:561(fetch_block_entry)\r\n 65153 0.346 0.000 0.796 0.000 scanner.py:617(fetch_value) [138/1983]\r\n 5764 0.007 0.000 0.071 0.000 scanner.py:67(__init__)\r\n 121802 0.142 0.000 8.517 0.000 scanner.py:739(fetch_plain)\r\n 13915 0.013 0.000 0.027 0.000 scanner.py:760(check_document_start)\r\n 5790 0.003 0.000 0.003 0.000 scanner.py:768(check_document_end)\r\n 13915 0.009 0.000 0.013 0.000 scanner.py:776(check_block_entry)\r\n 65153 0.094 0.000 0.435 0.000 scanner.py:789(check_value)\r\n 121802 0.123 0.000 0.770 0.000 scanner.py:808(check_plain)\r\n 6485203 1.171 0.000 1.557 0.000 scanner.py:85(flow_level)\r\n 11528 0.016 0.000 0.100 0.000 scanner.py:90(reset_scanner)\r\n53625/20031 0.080 0.000 3.076 0.000 schema.py:103(validate)\r\n 53625 0.068 0.000 0.095 0.000 schema.py:111()\r\n 716348 0.618 0.000 0.955 0.000 schema.py:196(_priority)\r\n 272439 0.263 0.000 1.307 0.000 schema.py:20(__init__)\r\n 429301 0.122 0.000 0.122 0.000 schema.py:217(__init__)\r\n 149281 0.098 0.000 0.365 0.000 schema.py:225(_dict_key_priority)\r\n567067/5764 1.264 0.000 4.393 0.001 schema.py:245(validate)\r\n 272439 0.345 0.000 1.044 0.000 schema.py:25(code)\r\n 22419 0.014 0.000 2.811 0.000 schema.py:254()\r\n 33594 0.033 0.000 0.033 0.000 schema.py:295()\r\n 544878 0.271 0.000 0.670 0.000 schema.py:31(uniq)\r\n 19679 0.035 0.000 0.088 0.000 schema.py:312()\r\n 220210 0.065 0.000 0.092 0.000 schema.py:370(__hash__)\r\n 544878 0.263 0.000 0.399 0.000 schema.py:38()\r\n 544878 0.068 0.000 0.068 0.000 schema.py:39()\r\n 272439 0.041 0.000 0.041 0.000 schema.py:40()\r\n 8504 0.016 0.000 0.028 0.000 schema.py:74(__init__)\r\n 8504 0.015 0.000 2.972 0.000 schema.py:86(validate)\r\n 8504 0.012 0.000 0.017 0.000 schema.py:93()\r\n 5764 0.014 0.000 37.391 0.006 stage.py:14(load_stage_fd)\r\n 5764 0.009 0.000 0.012 0.000 stage.py:157(__init__)\r\n 30470 0.035 0.000 0.074 0.000 stage.py:202(is_valid_filename)\r\n 5764 0.029 0.000 4.882 0.001 stage.py:355(validate)\r\n 5764 0.002 0.000 0.007 0.000 stage.py:570(_check_dvc_filename)\r\n 5764 0.007 0.000 0.055 0.000 stage.py:580(_check_file_exists)\r\n 5764 0.005 0.000 0.031 0.000 stage.py:585(_check_isfile)\r\n 5764 0.010 0.000 0.034 0.000 stage.py:590(_get_path_tag)\r\n 5764 0.147 0.000 62.820 0.011 stage.py:598(load)\r\n 11528 0.009 0.000 0.012 0.000 tokens.py:137(__init__)\r\n 322673 0.067 0.000 0.067 0.000 tokens.py:16(__init__)\r\n 121802 0.072 0.000 0.097 0.000 tokens.py:241(__init__)\r\n 353596 0.096 0.000 0.222 0.000 tokens.py:56(comment)\r\n 169664 0.066 0.000 0.180 0.000 tokens.py:61(move_comment)\r\n 5764 0.003 0.000 0.116 0.000 tree.py:45(open)\r\n 5764 0.004 0.000 0.046 0.000 tree.py:49(exists)\r\n 5764 0.005 0.000 0.025 0.000 tree.py:57(isfile)\r\n 3389 0.004 0.000 0.246 0.000 tree.py:61(walk)\r\n 57799 0.007 0.000 0.007 0.000 util.py:35()\r\n 57799 0.056 0.000 0.074 0.000 util.py:40(__getattribute__)\r\n 220083 0.047 0.000 0.047 0.000 {built-in method __new__ of type object at 0x7fe0472981c0}\r\n 17386 0.010 0.000 0.010 0.000 {built-in method _codecs.utf_8_decode}\r\n 3387 0.001 0.000 0.001 0.000 {built-in method _stat.S_ISLNK}\r\n 5764 0.001 0.000 0.001 0.000 {built-in method _stat.S_ISREG}\r\n 369139 0.031 0.000 0.031 0.000 {built-in method builtins.callable}\r\n 1 0.000 0.000 113.095 113.095 {built-in method builtins.exec}\r\n 2807363 0.534 0.000 0.534 0.000 {built-in method builtins.getattr}\r\n 7064428 1.465 0.000 1.465 0.000 {built-in method builtins.hasattr}\r\n 220210 0.027 0.000 0.027 0.000 {built-in method builtins.hash}\r\n 15174575 1.070 0.000 1.070 0.000 {built-in method builtins.id}\r\n 16204105 2.353 0.000 2.454 0.000 {built-in method builtins.isinstance}\r\n 783536 0.099 0.000 0.099 0.000 {built-in method builtins.issubclass}\r\n 12008460 0.837 0.000 0.855 0.000 {built-in method builtins.len}\r\n 5764 0.002 0.000 0.002 0.000 {built-in method builtins.max}\r\n 5764 0.005 0.000 0.005 0.000 {built-in method builtins.min}\r\n 45810 0.049 0.000 0.351 0.000 {built-in method builtins.next}\r\n 188708 0.087 0.000 0.087 0.000 {built-in method builtins.setattr}\r\n 33594 0.090 0.000 0.455 0.000 {built-in method builtins.sorted}\r\n 5764 0.107 0.000 0.114 0.000 {built-in method io.open}\r\n 9551462 1.071 0.000 1.121 0.000 {built-in method posix.fspath}\r\n 5764 0.013 0.000 0.013 0.000 {built-in method posix.getcwd}\r\n 9151 0.064 0.000 0.064 0.000 {built-in method posix.lstat}\r\n 9152 0.053 0.000 0.053 0.000 {built-in method posix.scandir} [67/1983]\r\n 11528 0.050 0.000 0.050 0.000 {built-in method posix.stat}\r\n 1914109 0.183 0.000 0.183 0.000 {built-in method sys._getframe}\r\n 412844 0.089 0.000 0.089 0.000 {built-in method sys.intern}\r\n 67188 0.076 0.000 0.076 0.000 {method '__reduce_ex__' of 'object' objects}\r\n 467898 0.071 0.000 0.091 0.000 {method 'add' of 'set' objects}\r\n109256682 6.549 0.000 6.549 0.000 {method 'append' of 'list' objects}\r\n 1138 0.001 0.000 0.001 0.000 {method 'clear' of 'dict' objects}\r\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\r\n 31207 0.005 0.000 0.005 0.000 {method 'discard' of 'set' objects}\r\n 5974 0.004 0.000 0.004 0.000 {method 'encode' of 'str' objects}\r\n 75167 0.012 0.000 0.012 0.000 {method 'endswith' of 'str' objects}\r\n 178451 0.020 0.000 0.020 0.000 {method 'extend' of 'list' objects}\r\n 11388 0.003 0.000 0.003 0.000 {method 'find' of 'str' objects}\r\n 20556039 2.051 0.000 2.051 0.000 {method 'get' of 'dict' objects}\r\n 84832 0.022 0.000 0.022 0.000 {method 'insert' of 'list' objects}\r\n 181395 0.078 0.000 0.078 0.000 {method 'is_dir' of 'posix.DirEntry' objects}\r\n 28183 0.010 0.000 0.010 0.000 {method 'issubset' of 'set' objects}\r\n 167970 0.020 0.000 0.020 0.000 {method 'items' of 'dict' objects}\r\n 9605612 2.222 0.000 2.222 0.000 {method 'join' of 'str' objects}\r\n 17292 0.004 0.000 0.004 0.000 {method 'lower' of 'str' objects}\r\n 27830 0.008 0.000 0.008 0.000 {method 'lstrip' of 'str' objects}\r\n 69327 0.076 0.000 0.076 0.000 {method 'match' of '_sre.SRE_Pattern' objects}\r\n 36971 0.042 0.000 0.102 0.000 {method 'pop' of 'collections.OrderedDict' objects}\r\n 540768 0.107 0.000 0.107 0.000 {method 'pop' of 'list' objects}\r\n 11738 0.081 0.000 0.109 0.000 {method 'read' of '_io.TextIOWrapper' objects}\r\n 41715 0.005 0.000 0.005 0.000 {method 'reverse' of 'list' objects}\r\n 53526 0.013 0.000 0.013 0.000 {method 'rfind' of 'str' objects}\r\n 34584 0.009 0.000 0.009 0.000 {method 'rstrip' of 'str' objects}\r\n 34584 0.045 0.000 0.045 0.000 {method 'search' of '_sre.SRE_Pattern' objects}\r\n 322784 0.051 0.000 0.051 0.000 {method 'setdefault' of 'dict' objects}\r\n 9244935 4.773 0.000 4.773 0.000 {method 'split' of 'str' objects}\r\n 18501289 2.396 0.000 2.396 0.000 {method 'startswith' of 'str' objects}\r\n 5974 0.007 0.000 0.007 0.000 {method 'translate' of 'bytes' objects}\r\n 33594 0.014 0.000 0.014 0.000 {method 'update' of 'dict' objects}\r\n```\r\n\r\nI interrupted the call after about 2 minutes.\n@danfischetti Got it, so looks like I've misunderstood you yesterday :slightly_frowning_face: (we've found a major bug thanks to that, so yay :tada: :slightly_smiling_face: ).\r\n\r\nPlease correct me if I'm wrong, but AFAIK you are only using dvc through API, right? In that case, a quick workaround would be to simply monkeypatch `Repo.check_modified_graph` with a `noop`. E.g.\r\n```\r\nrepo = Repo(\".\")\r\nrepo.check_modified_graph = lambda *args: None\r\nrepo.add(\"something\")\r\nrepo.checkout(\"other.dvc\")\r\n```\r\n\r\nI need to stress that this is pretty dangerous, as your new `repo.add()` might be overlapping with some existing dvc-file(another dvc-file that has the same file you are adding listed as an output) and you might get unexpected results from `repo.checkout()`(no target). \r\n\r\nA proper solution would be to try to optimize this by, for example, caching the results of stage collection in particular directories based on their mtime and doing some other related things. But that would only work if most of your directories are not being constantly updated, of course. Could you please talk a bit more about the scenario you have? What are the requirements? How long of an execution time for `check_modified_graph` would be acceptable for you? How wide and how deep of a file tree do you have? Are dvc-files scattered all over that tree, or do you have large chunks that don't have them?\r\n\nThough, thinking about it, operations like add/fetch/checkout/etc that are related to data management part of dvc sholdn't really care about DAG relations. At most, they could check that there are no overlapping outputs when you are doing things like `dvc checkout`, since that would create a race condition(but for this we don't have to collect dag for the whole project). This is a really interesting thing to consider. Need to think about it a bit more, but for now, it does look promising. 🤔 \r\n\r\nFor example, say you have `big.dvc` and `small.dvc` that both point to `data`. Then if we lift this current restriction, we would be able to do neat things like\r\n```\r\ndvc checkout big.dvc\r\n./myscript.sh data\r\ndvc checkout small.dvc\r\n./myscript.sh data\r\n```\r\n( note that this also effectively replaces a currently hidden `dvc tag` feature )\r\nbut when you try to\r\n```\r\ndvc checkout\r\n```\r\nit should probably raise an error, because `small.dvc` and `big.dvc` overlap, so the content of `data` would depend on the order of underlying linking.\nCurrently we really only use the API, but that is partially because a lot of the cli primitives are so painfully slow. Some simple things would be nice to do without having to drop into python and script it.\r\n\r\nThe bulk of our directory structure is organized by \"scene\", which is a logical unit of video and image data corresponding to a point in time. There are 100s of scenes and each scene has a few dozen files associated with them, oftentimes data associated with a scene will be associated with a particular model, and that file type across many scenes is updated at once. This workflow is managed by our API, but sometimes we want to change a typo in a single file, where the workflow should just be \"dvc add \" or \"dvc checkout \". If we know the specific file being added or checked out i dont think these DAG checks are buying us anything.\n> Currently we really only use the API, but that is partially because a lot of the cli primitives are so painfully slow. Some simple things would be nice to do without having to drop into python and script it.\r\n\r\n@danfischetti Were you able to pinpoint the parts that are slow for you there compared to the API? Also, have you tried a 0.65.0 CLI? It should be pretty similar to API these days. We've improved the startup time in the recent versions quite significantly, so the gap between CLI and API should've shrunken.\r\n\r\n> The bulk of our directory structure is organized by \"scene\", which is a logical unit of video and image data corresponding to a point in time. There are 100s of scenes and each scene has a few dozen files associated with them, oftentimes data associated with a scene will be associated with a particular model, and that file type across many scenes is updated at once. This workflow is managed by our API, but sometimes we want to change a typo in a single file, where the workflow should just be \"dvc add \" or \"dvc checkout \". If we know the specific file being added or checked out i dont think these DAG checks are buying us anything.\r\n\r\nThanks for clarifying! Would that monkeypatch workaround be suitable for you for now? In the meantime, we'll consider the idea from https://github.com/iterative/dvc/issues/2671#issuecomment-546518272 , as it has the potential to be a simple, effective and, most of all, correct solution for all of us. 🙂 \r\n\r\nThank you so much for the great feedback! 🙂 \r\n \r\n\nI have tried the 0.65.0 cli, the reason it's slower than the API is we're skipping the high level \"add\" operation and are manually creating and saving Stage objects. Otherwise the api would be just as slow due to the `collect_stages` call.\r\n\r\nYes i think the idea in mentioned in that comment would work. DAG checks are totally appropriate when doing a top level checkout, only skipping that when a specific file is requested would suit our needs.\nDiscussed this offline with @efiop and @dmpetrov and on 1-1 with @shcheklein and there is no consensus on lifting DAG checks even for `dvc add` command. The core consideration is that people should be able to:\r\n```bash\r\ngit clone ... && dvc pull\r\n``` \r\nand continue their or their teammates work without any complications. Ultimately we want both correctness (as it reinforced now) and performance.\nI'll do some research on ways to optimize or cache this.\nSo here is my benching so far (21k add stages):\r\n\r\n| task | time |\r\n| ------------------- | ----------- |\r\n| list | 0.65s |\r\n| list + mtime/size | 0.85s |\r\n| list + read | 1.16s |\r\n| parse yamls | 48.5s |\r\n| create stages | 69.6s |\r\n| stages (no schema) | 59.0s |\r\n| build graph | 69.6s |\r\n\r\nThe majority of time is taken by 2 things:\r\n1. YAML parsing (47s)\r\n2. Schema validation (10.5s)\r\n\r\nThe rest is split between path manipulations, deepcopies, outs and deps creation mostly. \n@Suor thanks, great summary! we need get rid of all last 4 effectively to make it usable.\nSwitching from `ruamel.yaml` back to `PyYAML` cuts parsing time in half - 24.6s instead of 48.5s. But that doesn't preserve comments so stages can't be safely dumped.\n@shcheklein if we cache stages then building graph is not the issue. The issues that remain:\r\n- still slow on empty cache\r\n- need to make cache cross-python, cross-dvc\n@Suor cache might be a solution. But it still takes time to build it. We'll need to do checks to ensure that it's still valid in case someone manually changes DVC-file. We'll have to think about things like atomicity, etc. \n@shcheklein we can cache either by `(filename, mtime, size)` tuple or even file contents (reading which is fast enough), so someone manually changing DVC file is not an instance.\r\n\r\nAnother thing, which I see is that we are using python yaml parser. PyYAML somewhat supports wrapping libyaml, which should speed up things. Here is how you install it though:\r\n```bash\r\npython setup.py --with-libyaml install\r\n```\r\nSo no luck with using it as a dep)\n> manually changing\r\n\r\nis only one problem in supporting cache, there will be more tricker ones. So cache might be a solution but unfortunately a quite complicated one.\nSo the update on yaml libs:\r\n\r\n| library | time |\r\n| -----------------|------ |\r\n| ruamel.yaml | 48.5s |\r\n| PyYAML | 25.6s |\r\n| PyYAML (libyaml) | 3.9s |\r\n\r\nTo use PyYAML with libyaml on debian based linuxes:\r\n```bash\r\nsudo apt install libyaml-dev\r\npip install PyYAML\r\n```\r\n\r\nSo that as achievable via deps. We might want to use such strategy:\r\n- parse stages with `PyYAML` on read, store unparsed text\r\n- if we need to dump the stage, then parse text with `ruamel.yaml`, apply diff and dump_file\r\n\r\nThis way we'll make it faster for most scenarios without caching. Rewriting all the stages looks like a rare scenario.\r\n\r\nNot sure we can do anything with validation besides caching. This is single call:\r\n```python\r\nSchema(Stage.SCHEMA).validate(convert_to_unicode(d))\r\n```\r\nSo the only thing we can do besides caching is replacing validation lib altogether.\nI don't see how this specific optimization solves the problem, @Suor . But it def complicates all the logic and most likely packaging for different platforms.\nIt solves `(48.5 - 3.9) / 69.7 ~ 64%` of the problem. Even with an empty cache. It will be about 33% of the problem without libyaml C lib. Both the logic and packaging complication will be quite limited.\nWhat do you suggest? Implementing cache only? \n> 64% of the problem\r\n\r\nso, it complicates everything but does not solve the problem\r\n\r\n> What do you suggest?\r\n\r\nadd an option for people who manage large number of DVC-files an option to disable the check for now. There should not be a penalty for them and we should unblock the workflow. Also, it will give us more information, namely is there a potential problem in not performing this check. It should be working < 1s. Then, see what can we do - allow this setup in general and/or use cache or something else. \r\n\r\n\nBenched using [voluptuous](https://github.com/alecthomas/voluptuous) instead of using [schema](https://pypi.org/project/schema/) for validation, it works about 13x faster (it precompiles the schema into python function). This will strip another 14% of the problem, making it 8x faster combined with libyaml/PyYAML thing. There are other possible optimizations there.\r\n\r\n> so, it complicates everything but does not solve the problem\r\n\r\nIt's not that black and white. Making it faster will benefit everyone, will make it under 1s for whoever it is now under 8s, so the problem will be solved for them.\r\n\r\nSkipping check is not that rosy either:\r\n- if you'll still need to load many stages, it will remain slow\r\n- all the the other ops besides `dvc add` will remain slow \r\n- you'll need to avoid any code constructing graph accidentally (this includes future code)\r\n\n> It's not that black and white.\r\n\r\nI think this specific ticket and use case is black and white indeed. At least, I don't see how suggested optimizations can help. It will be a few minutes to just add a file, right? \r\n\r\nIt's a good question if there are other cases with thousands of DVC-files and what are the requirements there. It would answer the question if need to do a `middle ground` optimization with some potential complications on supporting this.\r\n\r\n> Skipping check is not that rosy either.\r\n\r\nnot saying that this the solution I like, it's just don't see how to unblock the workflow for the team and save us some time to come up with a better one if possible.\n> It will be a few minutes to just add a file, right?\r\n\r\nFor me it 1 minute now, before optimizations, so it's like 8 sec after optimizations, I have 21k files. It's much longer for @danfischetti. It took 113s for 5764 stages there, they are probably more complicated then mine.\r\n\r\n@danfischetti can you copy paste a yaml of your typical stage here? Also what's your directory structure? Do dvc files spread over the tree?\nBtw, guys, how about we start with simply making `dvc checkout some.dvc` not collect all stages? The reason it does that in the first place is because it is trying to cleanup old unused links that we no longer have dvc files pointing to, which, actually only makes sense on `dvc checkout`(without arguments). That should speed up `dvc checkout` for specific targets by a lot. With `dvc add` it is not that simple, as we are creating a new dvc stage there, which has risks of colliding with other stages, so we will have to optimize DAG collection there :slightly_frowning_face: \nFor the record: created https://github.com/iterative/dvc/pull/2750 to disable DAG checks for `dvc checkout` for specific targets.\nSo after all optimizations merged we have 8.8s instead of 69.6s to collect graph for 21k simple stages, it is a 7.9x speedup. Here is what takes time:\r\n\r\n| task | time | | what is added to prev line |\r\n|----------------|------|-------|-------------------------------------|\r\n| list + read | 1.1s | | (includes startup time) |\r\n| ... + parse | 3.1s | +2.0s | PyYAML/libyaml parsing |\r\n| ... + validate | 4.5s | +1.2s | validation/coercion with voluptuous |\r\n| collect stages | 7.2s | +2.7s | stage/dep/out object creation |\r\n| check stages | 8.2s | +1.0s | check dups, overlaps, etc |\r\n| collect graph | 8.8s | +0.6s | graph creation (incl. import nx and cycle check) |\r\n\r\nI would say even if we cache graph we can get at best 1.5s, if we cache stages individually - 2.5s. \r\n\r\nSince the issue for topic starter is not urgent I suggest stopping with this for now.\n@danfischetti could you please give it a try? does it solve the problem with your repo?\nClosing due to inactivity.\nI have also encountered this issue\ndvc version\r\n\r\nDVC version: 0.82.8\r\nPython version: 3.7.5\r\nPlatform: Darwin-18.7.0-x86_64-i386-64bit\r\nBinary: True\r\nPackage: osxpkg\r\nFilesystem type (workspace): ('apfs', '/dev/disk1s1')\nFor the record: @tushar-dadlani is running into `dvc pull something.dvc` collecting all of the stages for too long without any progress bars or anything. We need to at least add a progress bar, but should also consider not collecting stages when we are given a specific stage as a target already.\n@efiop we can also consider some further optimizations into stage collection. We stopped this because the initial user was non-responsive.\nThe issue is we need to cling to real problem better as in my test scenario it's generally ok.\n@tushar-dadlani the thing this works well for my test scenario, I may try to invent new ones, but it would make much more sense to look at your case. Can you provide a cut-up anonymized copy of you repo:\r\n\r\n```bash\r\ncd \r\nmkdir ../repo-copy; cp -r * ../repo-copy # skipping .dvc and .git here\r\n\r\ncd ../repo-copy\r\nfind . -type f -not -name \\*.dvc -exec sh -c 'echo ERASED > {}' \\;\r\n\r\ncd ..\r\ntar czvf repo.tar.gz repo-copy/; rm -rf repo-copy\r\n```\r\n\r\nThen attach `repo.tar.gz` here or to the slack channel. This will repliacte all the dir/file structure as well as stages and pipelines, which should be enough to reproduce and optimize it.\n> @tushar-dadlani the thing this works well for my test scenario, I may try to invent new ones, but it would make much more sense to look at your case. Can you provide a cut-up anonymized copy of you repo:\r\n> \r\n> ```shell\r\n> cd \r\n> mkdir ../repo-copy; cp -r * ../repo-copy # skipping .dvc and .git here\r\n> \r\n> cd ../repo-copy\r\n\r\n#### This line should have some warning, as if it is run in a different location compared to your assumed location, it can be disastrous.\r\n> find . -type f -not -name \\*.dvc -exec sh -c 'echo ERASED > {}' \\;\r\n> \r\n> cd ..\r\n> tar czvf repo.tar.gz repo-copy/; rm -rf repo-copy\r\n> ```\r\n> \r\n> Then attach `repo.tar.gz` here or to the slack channel. This will repliacte all the dir/file structure as well as stages and pipelines, which should be enough to reproduce and optimize it.\r\n\r\n\nGreat point @tushar-dadlani ! Thanks for providing the test repo 🙏 We are able to reproduce the problem, looking into it right now.\nSo my implementation using tries gives:\r\n```\r\nTries:\r\n 654.77 ms in collect stages\r\n 26.63 ms in dups/overlaps\r\n 19.35 ms in stages in outs\r\n 188.29 ms in build graph\r\n 370.05 ms in check check_acyclic\r\n 1.45 s in _collect_graph(Repo: '/home/suor/proj...)\r\n\r\nOld code:\r\n 650.43 ms in collect stages\r\n 27.48 ms in dups/overlaps\r\n 35.23 ms in stages in outs\r\n 3.02 s in build graph\r\n 400.53 ms in check acyclic\r\n 4.33 s in _collect_graph(Repo: '/home/suor/proj...)\r\n```\r\n\r\nfor 1320 stages. Will test on @tushar-dadlani's data and create a PR.\nThis is what I have for @tushar-dadlani's repo:\r\n```\r\n 26.49 s in collect stages\r\n 1.32 s in dups/overlaps\r\n 782.49 ms in stages in outs\r\n 7.23 s in build graph\r\n 18.80 s in check acyclic\r\n 54.83 s in _collect_graph(Repo: '/home/suor/proj...)\r\n```\r\nBuild graph is not the biggest anymore. The only way to make it fast is probably caching. Making some commands avoid building a graph and making full collection is also a good idea.\r\n\r\nOld code:\r\n```\r\n 25.61 s in collect stages\r\n 1.72 s in dups/overlaps\r\n 1.74 s in stages in outs\r\n^C 2997.81 s in build graph # interrupted \r\n 3027.11 s in _collect_graph(Repo: '/home/suor/proj...)\r\n```":1,"not only commit/add, some action like 'dvc move' would not count desc/remote in as well. It seems we cannot add remote and desc into dvc file currently. \r\n@skshetry how about updating the doc in https://dvc.org/doc/user-guide/project-structure/dvc-files#output-entries\nWe try to preserve the data for meta and desc here, but we did not do that for `remote`.\r\n\r\nhttps://github.com/iterative/dvc/blob/30d6839a5f471fb02a7a2dcbd6a6616b71277762/dvc/stage/__init__.py#L116-L119\r\n\r\nThe `remote` can be accessed from `out.remote`, and can be set to the new stage that we are creating, as we do for `desc`.\r\n\r\n---\r\n\r\nFor the `move`, it's going to be a bit more complicated, but involves the same thing that we do above.\r\n\r\n\nHey DVC team! Wondering whether there is an ETA on this issue or if not, whether I can take a stab at a fix? I am working with a setup where I require per-dvc-file remote outputs and this bug is making it infeasible to update (and push) ml model files to dvc remotes when conducting model training on Gitlab runners.":1,"Since we will be releasing 1.0 soon, might even do that in a non-backward compatible way to simplify the logic.":1,"Can be reproduced in master with\r\n```py\r\ndef test_ignore_blank_line(tmp_dir, dvc, monkeypatch):\r\n tmp_dir.gen({\"dir\": {\"ignored\": \"text\", \"other\": \"text2\"}})\r\n tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, \"dir/ignored\\n\\nfoo\")\r\n\r\n assert _files_set(\"dir\", dvc.tree) == {\"dir/other\"}\r\n\r\n monkeypatch.chdir(\"dir\")\r\n assert _files_set(\".\", dvc.tree) == {\"./other\"}\r\n```":1,"Hi @art049 \r\nYes you're right. Since #1971 has been solved, this is the expected behaviour but it seems we update only the current field and not all. I can make a quick fix if you want should take couple of minutes":1,"> Make sure only the table is presented in STDOUT. All other messages (like \"No changes.\") should be in STDERR.\r\n\r\nNot sure why it should be like that. Table as well as `No changes.` are for the user, not really meant to be parsable. For parsing we have `--show-json`. I don't think we do that anywhere else in the dvc.\nI'd prefer to have an empty output in this case. No need in a special message. It is a usual approach for many commands including `git diff`.\n@dmpetrov But we've been talking about dvc commands being too silent, hence why we've introduced such summary messages everywhere. `No changes` is consistent here with the rest of the commands.\nDon't have a strong opinion on this (`git diff` does not show anything, right? but on the other hand it outputs patch by default and DVC outputs some human-readable table - so not a direct comparison for sure).\r\n\r\nIt's better to be consistent with other DVC \"diff\" commands though.\n@dmpetrov noticed that it makes it not possible to use `dvc metrics diff` in shell like `[ -z \"dvc metrics diff ^HEAD\" ]` which is bad. Moving to stderr, makes total sense to me now.\nAlso probably time to use `print` there instead of `logger.info`...\n@efiop do we have the same problem (not sure though why --json could not be used for example in the same bash check) for other commands? again, just to make sure that we are consistent more or less\nAll the \"information\" commands need output nothing if there no information. This way you can easily use them from scripts like\r\n```\r\n$ git diff\r\n$ if [[ -z `git diff`]]; then \r\n # do something\r\nfi\r\n```\r\nOtherwise, we are pushing people to parse the outputs - `if [[ 'dvc metrics diff' = \"No changes\" ]]`. Clear anti-pattern.\r\n\r\nFor \"action\" commands (not \"information\") like `dvc checkout` it is fine (and usually preferable) to output human-readable information.\nWould you consider `git status` an information or action command? :)\nI really think that the reason behind `git diff` is that it's not meant to be consumed by a human even if there are some changes in the first place. `git diff` with its default options is similar to `dvc diff --json`.":1,"Context: https://discordapp.com/channels/485586884165107732/485596304961962003/760236253496213515\nProbable cause: the path to the file is `/home/data/cana/ds30/cana-mucuna/class35_e2545053-f2c5-4108-9042-67244a94e267_p_['cana']_o_['cana', 'mucuna'].jpg` (includes combinations of special charactes like `[`, `'`, `]`, `,`, and ` `) which the file system supports via terminal as well as `ssh` and `scp`, but `paramiko` doesn't support it. See https://github.com/paramiko/paramiko/issues/583\n@jorgeorpinel looks like paramiko/paramiko#583 is about `exec_command` is it still relevant in this case? \r\n\r\n(I'm asking mostly to see if we need to create a ticket on the paramiko side in advance if we are sure this paramiko's issue- it takes time to resolve them)":1,"It seems like write mode should just not be supported at all here. If you are writing to a file associated with a specific git revision, wouldn't this require rewriting git history?\r\n\r\nActually, is write mode even something that should be supported in `api.open` at all (with or without rev)?\nIf anything this sounds more like a `dvcx` scenario where a write actually requires\r\n- modify the file\r\n- `dvc commit` the changes if it's a DVC tracked file\r\n- `git add + git commit` the changes to relevant files\nYeah, it shouldn't be supported. Going through too many issues 😄 . It would be nice to have a better error message and maybe document when write mode isn't supported.":1,"What will be the `error` data used for? Do we only want to pass the error message to the user? Or will there be some logic involved with processing the errors on `vs-code` side?\nWe'll be processing the errors before displaying anything to the user. Would be good to have a way to identify where certain revisions are missing data due to errors (as in the example provided in https://github.com/iterative/dvc/issues/7691).\n@pared Any progress on this one?\nNo, but I believe we could include it as a part of implementing iterative/vscode-dvc#1757\n@pared Any updates on looking into this one?\nI consider it as a part of aforementioned issue on `vscode` - but the estimation for vscode depends on research on studio side. It is not yet finished.\nNote to self: since returning errors will probably require data structure change, we need to remember to get rid of filling `rev` in datapoints - as vscode sometimes need to assign their own revision (eg 'main' vs short sha of main).\nI didn't left any comment during research, so:\nWe were able to implement top level plots basing on old data format. In order to support errors we will need to change the data structure returned by `dvc plots ... --json`.\n@mattseddon could you please share the location of the code that parses the `--json` result for plots on our end.\r\n\r\n@dberenbaum do we know if anyone else besides vs code depends on `--json`?\r\n\r\n- [ ] ⌛ A bit of research. JSON structure looks extremely suboptimal (tons of duplication), since we are changing it, I'd like to have a bit better understanding of how it's being used.\r\n- [ ] Another question to answer and agree on how we process directories (if the whole plot dir can't be expanded and we don't know what files it has we can't send an error per file then, we'll have to send it per directory)\r\n- [ ] Change the output\r\n\nUse https://github.com/iterative/vscode-dvc/blob/main/extension/src/plots/model/index.ts#L108 as an entry point.\n\"Screen\r\n\r\nI see that data collection depends on the `datapoints` field and is not using `data` in the content: \r\n\r\nhttps://github.com/iterative/vscode-dvc/blob/main/extension/src/plots/model/collect.ts#L372\r\nhttps://github.com/iterative/vscode-dvc/blob/main/extension/src/plots/model/collect.ts#L423\r\n\r\n@mattseddon do you remember from the top of your head if we need `data` in the template, it looks identical (at least in the sample I have), do we need it in VS Code? And why did we decide to keep both (e.g. why don't we parse `plot.content.data` instead of `datapoints`).\r\n\r\nAre there some proposals, tickets, PRs for the plots JSON format?\r\n\nI do not think that we need it.\n> Are there some proposals, tickets, PRs for the plots JSON format?\r\n\r\nThe original PR is here: https://github.com/iterative/dvc/pull/7367. From reading the description it looks like the data being duplicated is a bug for the `--split` flag.\n> @dberenbaum do we know if anyone else besides vs code depends on `--json`?\r\n\r\nNo, I don't think so.\r\n\r\nFor the duplicated data, I'm missing something because I have different output from what @shcheklein shows above. I don't see all the data in `content.data.values`. My output for `dvc plots diff 504206e f586d67 workspace -o .dvc/tmp/plots --split --json` looks like this:\r\n\r\n```yaml\r\n{\r\n \"dvc.yaml::Accuracy\": [\r\n {\r\n \"type\": \"vega\",\r\n \"revisions\": [\r\n \"504206e\",\r\n \"f586d67\",\r\n \"workspace\"\r\n ],\r\n \"content\": {\r\n \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\",\r\n \"data\": {\r\n \"values\": \"\" # Nothing else shows up in this field.\r\n },\r\n...\r\n```\n@dberenbaum my bad, I didn't use `split` I think. I wonder what's the purpose of `datapoints` in the non-split mode then? (not critical I think at all, since JSON is not used anywhere now).\nUpdated the description - some examples of the current output. Next - try to add an error for an image plot (not directory with images_ case for now (`example-get-started`'s `importance.png`).\nIn general, I find returning errors to be a mistake. It increases a lot of maintenance burden, for which we are not ready internally.\n@skshetry it's a bad user experience to not show anything at all in case something fails and since we. I think if it's done right it won't be a bigger burden at all and code doesn't have to be complicated. We already have this data we just need to propagate it (I think so at least, I can be wrong). And to clarify, we don't talk here about processing specific types of errors, we just need a signal that plot exists in a revision and that it can't be loaded for some reason.\r\n\r\nOn the maintenance side - I think the whole plots logic and related index part should be the first thing to improve. E.g. after the last refactoring we still have two plot accessors (`_plots` and `plots`), still some custom collect logic, a lot of logic with path manipulations, old code (like output plots, etc) - those are points that should be remove, refactored, etc to make it lighter and simpler.\n> We already have this data we just need to propagate it\r\n\r\nThat's where the complexity is, right? It's easy to log or suppress but extremely hard to propagate up. We need small sets of APIs at the high level where we do this. At the moment we are spreading this logic to deep layers which increases the burden.\r\n\r\nI think there should be a symmetry between the product and the engineering side, and here I think the expectation on the product side is too high (or, was too high). :)\n> That's where the complexity is, right? \r\n\r\nDoesn't have to be. Sometimes dropping some code (that removes and / or transforms things) instead of exposing them directly (which might be just fine in this case) can simplify it. We'll see how it goes. I definitely want to avoid adding tons of custom code for this.\r\n\r\n> I think there should be a symmetry between the product and the engineering side, and here I think the expectation on the product side is too high (or, was too high). :)\r\n\r\nI think it's a wrong dichotomy in this case. I'm not sure if it's possible to do it now w/o complicating things. It's definitely doesn't add much complexity to do this from scratch. If we had the standard in mind (it's not high at all) we would have spent some small additional percent of time.\r\n\r\nProduct expectation - we talk about VS Code, right (that's what I have in mind in the first place), not DVC? Just in case. I'm fine (more or less) for DVC to return a generic error (and write something in logs). In VS Code it leads to bad experience. It's not top priority (that's why I'm doing this in background), but it can and should be fixed. And we should have a higher standard for out products.\nFor visibility: got distracted by some other plots issues (broken smooth templates, new DVCLive release) and didn't have capacity for this hands on work (which is not a lot of time by default). I'll try to get back to this asap.\r\n\r\nSome design decisions that are tricky here. If we have a plot directory we expand each file in that directory as its own plot when we return the result. It's fine. The problem is that we don't know the layout if we can't download the `.dir` in the first place. So, for these granular plots - we can't communicate errors at all- we don't know for sure if they exist or not in the directory in the failed revision. We'll have assume that they don't I guess + communicate that we were not able to process the whole directory.\r\n\r\n\n@shcheklein Can you clarify the full scope of the issue? Is it only about plot directories, or is that merely one case you are trying to solve for?\nYes, @dberenbaum . It's related to this issues - https://github.com/iterative/vscode-dvc/issues/2277 and https://github.com/iterative/vscode-dvc/issues/1649 in VS Code repo. Very high level - we need to distinguish absent plots from errors and show some signal to users vs silently ignoring things and/or showing misleading messages (refresh button when there is nothing to refresh in an experiment).\r\n\r\n> Can you clarify the full scope of the issue? Is it only about plot directories, or is that merely one case you are trying to solve for?\r\n\r\nThus: The full scope: show error message for all plot definitions, not only directories / images. \n@skshetry Can you follow up with questions you have, and @shcheklein and I can respond to define the scope better? By next week when you are finished with support duty, let's try to have a solid plan and estimate 🙏 .\nI could not look into this during support duty, as some p0s/bugs came.\nMore related issues:\r\n* https://github.com/iterative/vscode-dvc/issues/3222\r\n* #7887\nWe do seem to preserve errors during `plots.collect()`. We transform internal representation to the JSON format, where we lose most of the information. We could start with exposing that, what would be a good json format for incorporating errors for vscode?\n> We could start with exposing that, what would be a good json format for incorporating errors for vscode?\r\n\r\n@skshetry 🤔 tbh I don't think VS Code requires anything specific here. We should come up with a decent general format for this data. We can adjust VS Code if needed. \nI think what we've learned is that it's helpful to share drafts early and often to get feedback as you go so we know mostly what works in both products by the time we are ready to merge.":1,"As a workaround, maybe you could set the attribute type to a set? Then when a list gets passed in, it gets deduplicated automatically. Then you can override `json()` and `dict()` to convert it back to a list on the way out.\n@mdavis-xyz The idea of this feature is to validate that sequences have only unique items.\r\n\r\nCode below will show the idea:\r\n```python\r\nfrom typing import Set\r\n\r\nfrom pydantic import BaseModel, Field\r\n\r\n\r\nclass FooForm(BaseModel):\r\n tags: Set[int]\r\n\r\n\r\nfoo = FooForm(tags=[1, 1, 2]) # tags attribute will be {1, 2}\r\n\r\n\r\nclass BarForm(BaseModel):\r\n tags: Set[int] = Field(unique=True)\r\n\r\n\r\nbar = BarForm(tags=[1, 1, 2]) # will fail, because sequence has duplicate items\r\n```\r\n\r\nIf you have any questions, feel free to ask.\nThis is an interesting issue that clarifies some of the friction points with validation and casting within Pydantic. \r\n\r\nAs a workaround, it's possible to add a validator with 'pre=True' to get the desired behavior. \r\n\r\n```python\r\nfrom typing import Set, Sequence\r\nfrom pydantic import BaseModel, Field, validator\r\n\r\ndef custom_to_set(xs: Sequence[int]) -> Set[int]:\r\n items = set([])\r\n for item in xs:\r\n if item in items:\r\n raise ValueError(f\"Duplicate item {item}\")\r\n else:\r\n items.add(item)\r\n return items\r\n\r\n\r\nclass Record(BaseModel):\r\n tags: Set[int] = Field(...)\r\n\r\n @validator('tags', pre=True)\r\n def validate_unique_tags(cls, value):\r\n # raise ValueError or a set\r\n return custom_to_set(value)\r\n\r\ndef example() -> int:\r\n\r\n t1 = ['1', '2'] # Notice how liberal Pydantic is. This is not a validation error\r\n t2 = [1, 3, 1] # This now raises\r\n for t in [t1, t2]:\r\n print(Record(tags=t))\r\n return 0\r\n```\r\n\r\n\r\nIs this issue a bandaid for the fundamental loose casting/coercion model that Pydantic has adopted?\r\n\r\nhttps://github.com/samuelcolvin/pydantic/issues/1098\r\n\r\nYour proposal does seem reasonable and clarifies the default behavior of `Set[T]` in a \"strict\" mode should raise a core validation error if the input sequence has any duplicates. However, this is not how Pydantic has approached validation. Perhaps it would be better to wait until Pydantic has a coherent \"strict\" model?\r\n\r\nWith regards to the proposal, it's a bit counterintuitive to define a `Set[int]` and also set `Field(unique=True)` to get the validation to work \"correctly\". Similarly, `List[T] = Field(unique=True)` is counterintuitive because it's not clear why a `Set[T]` isn't being used. This case might be better solved by a custom validation/casting function with `pre=True`? \r\n\r\n\n@mpkocher This issue came up when I faced with an issue which I thought was a bug.\r\n\r\nI had a model and one of the fields had a type `Set[str]`, the model looked like:\r\n```python\r\nclass Form(BaseModel):\r\n tags: Set[int]\r\n```\r\nWhen one of the scripts tried to instantiate `Form` with duplicated items it simply cast sequence to a set, which was the wrong behavior for me script. After investigating I found that this is expected behavior, that why I create this PR)\r\n\r\nI agree with you that `List[T] = Field(unique=True)` can be a little bit confusing.\r\n\r\nRegarding #1098 it looks great and maybe it's better to return to this feature after `Strict Configuration` will be implemented.\nI'm running into similar friction points where the overly liberal casting yields surprising results. ":1,"> Would it be possible?\r\n\r\nThe internal API already supports multiple `revs`.\r\n\r\nThis should be a simple change to make for the CLI (TBH I thought the CLI already supported it)":1,"@rxxg I am unabe to reproduce the issue, \r\ncan I ask you to run following scirpt?\r\n```\r\nrmdir /s repo\r\nmkdir repo\r\npushd repo\r\ngit init --quiet\r\ndvc init -q\r\ndvc config cache.type hardlink\r\ndvc config cache.protected true\r\ngit commit -am \"init dvc\"\r\nfsutil file createnew data 10485760\r\ndvc add data\r\ngit add .gitignore data.dvc\r\ngit commit -am \"add data\"\r\nfsutil hardlink list data\r\ndvc unprotect data\r\nfsutil hardlink list data\r\necho hello >> data\r\ndvc status\r\npopd\r\n```\r\nDoes the status display corrupted cache WARNING?\r\n\r\n[EDIT]\r\nAlso, can I ask you to provide output of `dvc version` command? (note that its without `--`)\r\n\r\n[EDIT2]\r\nSorry, forgot its NFS drive, let me try to reproduce that again.\n@rxxg Also, as a temporary workaround you can change cache type to copy (`dvc config cache.type copy`) and use `dvc checkout --relink big_file.dvc`.\n@pared Thanks for looking at this. Yes, the use of a network drive (repo and cache) is essential.\r\n\r\n(I had been using copy cache but it is very slow for our use case since the copy that DVC does involves reading then writing to the network drive in 16k chunks. Native Windows copy is of the order of 3 seconds for a 128Mb file, 30 seconds for shutil.copyfileobj. There may be a separate bug report or PR for this.)\nFor the record:\r\n\r\n```\r\nQ:\\repo> git init --quiet\r\nQ:\\repo> dvc init -q\r\nQ:\\repo> dvc config cache.type hardlink\r\nWARNING: You have changed the 'cache.type' option. This doesn't update any existing workspace file links, but it can be done with:\r\n dvc checkout --relink\r\nQ:\\repo> dvc config cache.protected true\r\nQ:\\repo> git commit -am \"init dvc\"\r\n[master (root-commit) 7e8ba77] init dvc\r\n 2 files changed, 12 insertions(+)\r\n create mode 100644 .dvc/.gitignore\r\n create mode 100644 .dvc/config\r\nQ:\\repo> fsutil file createnew data 10485760\r\nFile Q:\\repo\\data is created\r\nQ:\\repo> dvc add data\r\n100% Add|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████|1.00/1.00 [00:01<00:00, 1.48s/file]\r\n\r\nTo track the changes with git, run:\r\n\r\n git add .gitignore data.dvc\r\nQ:\\repo> git add .gitignore data.dvc\r\nQ:\\repo> git commit -am \"add data\"\r\n[master d7e862f] add data\r\n 2 files changed, 8 insertions(+)\r\n create mode 100644 .gitignore\r\n create mode 100644 data.dvc\r\nQ:\\repo> fsutil hardlink list data\r\nError: The request is not supported.\r\nQ:\\repo> dvc unprotect data\r\nQ:\\repo> fsutil hardlink list data\r\nError: The request is not supported.\r\nQ:\\repo> echo hello >> data\r\nQ:\\repo> dvc status\r\nWARNING: corrupted cache file '.dvc\\cache\\f1\\c9645dbc14efddc7d8a322685f26eb'.\r\ndata.dvc:\r\n changed outs:\r\n not in cache: data\r\nQ:\\repo> dvc version\r\nDVC version: 0.80.0\r\nPython version: 3.7.1\r\nPlatform: Windows-10-10.0.16299-SP0\r\nBinary: False\r\nPackage: pip\r\nCache: reflink - False, hardlink - True, symlink - False\r\n```\n@rxxg Ok, thank you very much, I am trying to reproduce it on my machine.\n@rxxg Could you please install psutil with `pip install psutil` and then run `dvc version` again and show us the output?\r\n\r\nSide note for us: need to improve the way `dvc version` tests for link types by doing additional check for the created links. E.g. create hardlink and then do a sanity check with `System.is_hardlink`.\nSure.\r\n```\r\nDVC version: 0.80.0\r\nPython version: 3.7.1\r\nPlatform: Windows-10-10.0.16299-SP0\r\nBinary: False\r\nPackage: pip\r\nCache: reflink - False, hardlink - True, symlink - False\r\nFilesystem type (cache directory): ('NTFS', 'Q:\\\\')\r\nFilesystem type (workspace): ('NTFS', 'Q:\\\\')\r\n```\n@rxxg I see that it is reporting NTFS, but you were saying you are on NFS. Was it a typo or am I missing something?\n@rxxg Btw, is it your work machine or your personal one? We've seen something similar on NTFS in https://github.com/iterative/dvc/issues/2831 , but weren't able to find the cause for such a strange FS behavior at that time.\nSorry, typo 😳 Windows NTFS network share\r\nIt's my work machine, so I have zero control over the servers and even finding out info about the hardware/network protocol is hard work.\r\nI had locking failures which came from the same cause (#2944) but things are fine since the change to the locking system.\n@rxxg Thanks for clarifying! Makes more sense now. Btw, I suppose you don't have WSL enabled either, right? That would explain why `fsutil` doesn't work for you. That won't explain the original issue though, so we are still researching...\r\n\r\n \nThe issue might be caused by us using GetFileInformationByHandle, which could return incomplete data https://stackoverflow.com/questions/3523271/get-windows-hardlink-count-without-getfileinformationbyhandle. Looks like FindFirstFileNameW and FindNextFileNameW are the alternatives. And ansible is actually using it as well https://github.com/ansible/ansible/blob/105f60cf480572fb5547794cda1f9a05559ae636/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1#L230 . \nfsutil does work as expected on my local drive. I don't know what WSL is I'm afraid.\nSo we need to make our `is_hardlink` https://github.com/iterative/dvc/blob/0.80.0/dvc/system.py#L235 use `FindFirstFileNameW` and `FindNextFileNameW` to count hardlinks instead of relying on `nNumberOfLinks`. And then give you the dev version to check if it works for you. 🙂 \n@rxxg Created a POC patch for it. Please run\r\n```\r\npip uninstall -y dvc\r\npip install git+https://github.com/efiop/dvc@3080\r\n```\r\nto install it and then run\r\n```\r\ndvc version\r\n```\r\nand share its output.\nBad news 😞 \r\n```\r\n(dvc-3080) PS Q:\\dvc-test> dvc -v version\r\nERROR: unexpected error - (50, 'FindFileNames', 'The request is not supported.')\r\n\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\n@rxxg Could you show `dvc version -v` (in that particular order), please?\nOops, sorry.\r\n```\r\n(dvc-3080) PS Q:\\dvc-test> dvc version -v\r\nERROR: unexpected error - (50, 'FindFileNames', 'The request is not supported.')\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\main.py\", line 48, in main\r\n ret = cmd.run()\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\command\\version.py\", line 46, in run\r\n \"Cache: {}\".format(self.get_linktype_support_info(repo))\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\command\\version.py\", line 103, in get_linktype_support_info\r\n link(src, dst)\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\system.py\", line 48, in hardlink\r\n assert System.is_hardlink(link_name)\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\system.py\", line 250, in is_hardlink\r\n return System._count_hardlinks(path) > 1\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\system.py\", line 241, in _count_hardlinks\r\n return len(FindFileNames(path))\r\npywintypes.error: (50, 'FindFileNames', 'The request is not supported.')\r\n------------------------------------------------------------\r\n\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\nThanks @rxxg ! Interesting. Btw, are you aware of how the network share is setup? I'm not really a windows guy, and google didn't help much 🙁 Is there a central server that you are connected to? If that is so, my only explanation right now is that it is running something old which doesn't support `FileFirstFileName`.\r\n\r\nLooks like we ran out of options here, fs is returning an incomplete data and alternative ways of counting links are not supported. Another option that might work for you is enabling symlink support on your machine and using `dvc config cache.type symlink`.\nI don't have many details about the network server, sorry. Windows tells me that there is a cluster running NTFS + DFS, but I don't know what's on the other side.\r\n\r\nMy biggest concern at this point is that DVC is detecting that hardlinks are available (which they are, kind of) and trying to use them, but then failing to detect that the links have been correctly created. So if there are no other options for checking links DVC should refuse to try and create them and fallback to the next cache type?\r\n\r\nI will try symlinks next.\r\n\r\n[EDIT]\r\nSo under Windows, [symlinks require special workstation configuration](https://blogs.windows.com/windowsdeveloper/2016/12/02/symlinks-windows-10/) which means it's a non-starter for me unfortunately.\n> My biggest concern at this point is that DVC is detecting that hardlinks are available (which they are, kind of) and trying to use them, but then failing to detect that the links have been correctly created. So if there are no other options for checking links DVC should refuse to try and create them and fallback to the next cache type?\r\n\r\nYes, will update my PR to do preciselly that. Currently using simple asserts in it, but should actually rise a proper exception instead. Thanks for the reminder! 🙂 \r\n\r\n> So under Windows, symlinks require special workstation configuration which means it's a non-starter for me unfortunately.\r\n\r\nHave you tried installing our exe? Or do you have very limited rights on your machine?\n@rxxg I am unabe to reproduce the issue, \r\ncan I ask you to run following scirpt?\r\n```\r\nrmdir /s repo\r\nmkdir repo\r\npushd repo\r\ngit init --quiet\r\ndvc init -q\r\ndvc config cache.type hardlink\r\ndvc config cache.protected true\r\ngit commit -am \"init dvc\"\r\nfsutil file createnew data 10485760\r\ndvc add data\r\ngit add .gitignore data.dvc\r\ngit commit -am \"add data\"\r\nfsutil hardlink list data\r\ndvc unprotect data\r\nfsutil hardlink list data\r\necho hello >> data\r\ndvc status\r\npopd\r\n```\r\nDoes the status display corrupted cache WARNING?\r\n\r\n[EDIT]\r\nAlso, can I ask you to provide output of `dvc version` command? (note that its without `--`)\r\n\r\n[EDIT2]\r\nSorry, forgot its NFS drive, let me try to reproduce that again.\n@rxxg Also, as a temporary workaround you can change cache type to copy (`dvc config cache.type copy`) and use `dvc checkout --relink big_file.dvc`.\n@pared Thanks for looking at this. Yes, the use of a network drive (repo and cache) is essential.\r\n\r\n(I had been using copy cache but it is very slow for our use case since the copy that DVC does involves reading then writing to the network drive in 16k chunks. Native Windows copy is of the order of 3 seconds for a 128Mb file, 30 seconds for shutil.copyfileobj. There may be a separate bug report or PR for this.)\nFor the record:\r\n\r\n```\r\nQ:\\repo> git init --quiet\r\nQ:\\repo> dvc init -q\r\nQ:\\repo> dvc config cache.type hardlink\r\nWARNING: You have changed the 'cache.type' option. This doesn't update any existing workspace file links, but it can be done with:\r\n dvc checkout --relink\r\nQ:\\repo> dvc config cache.protected true\r\nQ:\\repo> git commit -am \"init dvc\"\r\n[master (root-commit) 7e8ba77] init dvc\r\n 2 files changed, 12 insertions(+)\r\n create mode 100644 .dvc/.gitignore\r\n create mode 100644 .dvc/config\r\nQ:\\repo> fsutil file createnew data 10485760\r\nFile Q:\\repo\\data is created\r\nQ:\\repo> dvc add data\r\n100% Add|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████|1.00/1.00 [00:01<00:00, 1.48s/file]\r\n\r\nTo track the changes with git, run:\r\n\r\n git add .gitignore data.dvc\r\nQ:\\repo> git add .gitignore data.dvc\r\nQ:\\repo> git commit -am \"add data\"\r\n[master d7e862f] add data\r\n 2 files changed, 8 insertions(+)\r\n create mode 100644 .gitignore\r\n create mode 100644 data.dvc\r\nQ:\\repo> fsutil hardlink list data\r\nError: The request is not supported.\r\nQ:\\repo> dvc unprotect data\r\nQ:\\repo> fsutil hardlink list data\r\nError: The request is not supported.\r\nQ:\\repo> echo hello >> data\r\nQ:\\repo> dvc status\r\nWARNING: corrupted cache file '.dvc\\cache\\f1\\c9645dbc14efddc7d8a322685f26eb'.\r\ndata.dvc:\r\n changed outs:\r\n not in cache: data\r\nQ:\\repo> dvc version\r\nDVC version: 0.80.0\r\nPython version: 3.7.1\r\nPlatform: Windows-10-10.0.16299-SP0\r\nBinary: False\r\nPackage: pip\r\nCache: reflink - False, hardlink - True, symlink - False\r\n```\n@rxxg Ok, thank you very much, I am trying to reproduce it on my machine.\n@rxxg Could you please install psutil with `pip install psutil` and then run `dvc version` again and show us the output?\r\n\r\nSide note for us: need to improve the way `dvc version` tests for link types by doing additional check for the created links. E.g. create hardlink and then do a sanity check with `System.is_hardlink`.\nSure.\r\n```\r\nDVC version: 0.80.0\r\nPython version: 3.7.1\r\nPlatform: Windows-10-10.0.16299-SP0\r\nBinary: False\r\nPackage: pip\r\nCache: reflink - False, hardlink - True, symlink - False\r\nFilesystem type (cache directory): ('NTFS', 'Q:\\\\')\r\nFilesystem type (workspace): ('NTFS', 'Q:\\\\')\r\n```\n@rxxg I see that it is reporting NTFS, but you were saying you are on NFS. Was it a typo or am I missing something?\n@rxxg Btw, is it your work machine or your personal one? We've seen something similar on NTFS in https://github.com/iterative/dvc/issues/2831 , but weren't able to find the cause for such a strange FS behavior at that time.\nSorry, typo 😳 Windows NTFS network share\r\nIt's my work machine, so I have zero control over the servers and even finding out info about the hardware/network protocol is hard work.\r\nI had locking failures which came from the same cause (#2944) but things are fine since the change to the locking system.\n@rxxg Thanks for clarifying! Makes more sense now. Btw, I suppose you don't have WSL enabled either, right? That would explain why `fsutil` doesn't work for you. That won't explain the original issue though, so we are still researching...\r\n\r\n \nThe issue might be caused by us using GetFileInformationByHandle, which could return incomplete data https://stackoverflow.com/questions/3523271/get-windows-hardlink-count-without-getfileinformationbyhandle. Looks like FindFirstFileNameW and FindNextFileNameW are the alternatives. And ansible is actually using it as well https://github.com/ansible/ansible/blob/105f60cf480572fb5547794cda1f9a05559ae636/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1#L230 . \nfsutil does work as expected on my local drive. I don't know what WSL is I'm afraid.\nSo we need to make our `is_hardlink` https://github.com/iterative/dvc/blob/0.80.0/dvc/system.py#L235 use `FindFirstFileNameW` and `FindNextFileNameW` to count hardlinks instead of relying on `nNumberOfLinks`. And then give you the dev version to check if it works for you. 🙂 \n@rxxg Created a POC patch for it. Please run\r\n```\r\npip uninstall -y dvc\r\npip install git+https://github.com/efiop/dvc@3080\r\n```\r\nto install it and then run\r\n```\r\ndvc version\r\n```\r\nand share its output.\nBad news 😞 \r\n```\r\n(dvc-3080) PS Q:\\dvc-test> dvc -v version\r\nERROR: unexpected error - (50, 'FindFileNames', 'The request is not supported.')\r\n\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\n@rxxg Could you show `dvc version -v` (in that particular order), please?\nOops, sorry.\r\n```\r\n(dvc-3080) PS Q:\\dvc-test> dvc version -v\r\nERROR: unexpected error - (50, 'FindFileNames', 'The request is not supported.')\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\main.py\", line 48, in main\r\n ret = cmd.run()\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\command\\version.py\", line 46, in run\r\n \"Cache: {}\".format(self.get_linktype_support_info(repo))\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\command\\version.py\", line 103, in get_linktype_support_info\r\n link(src, dst)\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\system.py\", line 48, in hardlink\r\n assert System.is_hardlink(link_name)\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\system.py\", line 250, in is_hardlink\r\n return System._count_hardlinks(path) > 1\r\n File \"c:\\users\\rxg\\dvc-3080\\lib\\site-packages\\dvc\\system.py\", line 241, in _count_hardlinks\r\n return len(FindFileNames(path))\r\npywintypes.error: (50, 'FindFileNames', 'The request is not supported.')\r\n------------------------------------------------------------\r\n\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\nThanks @rxxg ! Interesting. Btw, are you aware of how the network share is setup? I'm not really a windows guy, and google didn't help much 🙁 Is there a central server that you are connected to? If that is so, my only explanation right now is that it is running something old which doesn't support `FileFirstFileName`.\r\n\r\nLooks like we ran out of options here, fs is returning an incomplete data and alternative ways of counting links are not supported. Another option that might work for you is enabling symlink support on your machine and using `dvc config cache.type symlink`.\nI don't have many details about the network server, sorry. Windows tells me that there is a cluster running NTFS + DFS, but I don't know what's on the other side.\r\n\r\nMy biggest concern at this point is that DVC is detecting that hardlinks are available (which they are, kind of) and trying to use them, but then failing to detect that the links have been correctly created. So if there are no other options for checking links DVC should refuse to try and create them and fallback to the next cache type?\r\n\r\nI will try symlinks next.\r\n\r\n[EDIT]\r\nSo under Windows, [symlinks require special workstation configuration](https://blogs.windows.com/windowsdeveloper/2016/12/02/symlinks-windows-10/) which means it's a non-starter for me unfortunately.\n> My biggest concern at this point is that DVC is detecting that hardlinks are available (which they are, kind of) and trying to use them, but then failing to detect that the links have been correctly created. So if there are no other options for checking links DVC should refuse to try and create them and fallback to the next cache type?\r\n\r\nYes, will update my PR to do preciselly that. Currently using simple asserts in it, but should actually rise a proper exception instead. Thanks for the reminder! 🙂 \r\n\r\n> So under Windows, symlinks require special workstation configuration which means it's a non-starter for me unfortunately.\r\n\r\nHave you tried installing our exe? Or do you have very limited rights on your machine?":1,"Related: \r\nhttps://github.com/iterative/dvc/issues/5396":1,"Hi @ArcArcaman and thanks for reporting!\r\nI feel like this issue has already been discussed and if I'm not mistaken [there is already a fix](https://github.com/samuelcolvin/pydantic/pull/2170) 😉 \r\nMaybe you can have a look to check if it indeed solves your problem.\r\nHappy holidays ❄️ \nOh okay, thanks @PrettyWood! Yeah, it seems like it will solve my issue if the fix is merged. Sorry for the duplication, can't find it myself 😄 Happy holidays to you too ⛄ ":1,"Thanks for reporting. Amazing this has never come up before.\r\n\r\nShould be relatively simple to fix in [`pydantic/json.py`](https://github.com/samuelcolvin/pydantic/blob/master/pydantic/json.py), the only tricky thing is dealing with conditional imports.\n@samuelcolvin hey I'm trying to fix this bug and I'm encountering the tricky conditional imports thing. Would it be easier to move `NameEmail` to `pydantic/types.py` like `SecretStr`?\nHumm, I would prefer not to.\r\n\r\nWhat is the problem? network.py shouldn't import json.py":1,"Hi @jameysharp \r\nYep you're right but the workaround is easy (just not documented). Just like `dataclass` or `TypedDict`, _pydantic_ will attach a `BaseModel` to the main class under `__pydantic_model__`.\r\nSo you should be able to make it work by running\r\n`Tup.__pydantic_model__.update_forward_refs()`\r\n(I'm traveling so can't check on my phone for sure)\nOh, that's interesting. It doesn't work, but now I know things I didn't before. :grin:\r\n\r\nIf I call `Tup.__pydantic_model__.update_forward_refs()` immediately after declaring the `NamedTuple`, it fails with `AttributeError: type object 'Tup' has no attribute '__pydantic_model__'`, which seems reasonable.\r\n\r\nIf I call it after declaring the Pydantic model, then I get a different error (and a similar result in the real application I stripped this down from): `NameError: name 'PositiveInt' is not defined`\r\n\r\nSo it doesn't seem to be resolving the names using the correct global scope. My current hypothesis is that the same thing is happening at model definition time, but the error is hidden then because it's indistinguishable from a forward reference, even though this isn't actually a forward reference.\r\n\r\nBy contrast, declaring the `NamedTuple` field's type as `\"int\"` without future annotations, or as `int` with future annotations, works fine even without an `update_forward_refs` call, I assume because that name is bound in any scope.\r\n\r\nSo I think there are multiple bugs here:\r\n\r\n1. If people are expected to use `Foo.__pydantic_model__.update_forward_refs()`, then the `ConfigError` should say that instead of `Foo.update_forward_refs()`, and the documentation should cover that case.\r\n\r\n2. Maybe the implicit `__pydantic_model__` needs to be tied to the same module as the class which it wraps, somehow?\nYes it is added of course by pydantic once used in a BaseModel. A plain named tuple does not have this attribute so you need to call it after declaring your model.\r\nAnd for the context you're right locals are not the same.\r\nSo in your case it should work with something like\r\n`Tup.__pydantic_model__.update_forward_refs(PositiveInt=PositiveInt)`\r\nWe can (and should?) forward locals and stuff to resolve probably forward refs but it probably will never be perfect. So some doc and a better error message are probably a good idea as well\nBut `PositiveInt` isn't a local here, it's imported in the module at global scope. The [Postponed Annotations](https://pydantic-docs.helpmanual.io/usage/postponed_annotations/) section of the documentation says that should work. That's why I think the synthesized `__pydantic_model__` is referencing the wrong `module.__dict__`.\nYep I know I meant \"we can forward more than just the module\" sorry if I was not clear.\r\nAs I told you I'm on my phone but if you want to change your local pydantic you can try to change https://github.com/samuelcolvin/pydantic/blob/master/pydantic/validators.py#L561 and set `__module__` like it's done for dataclass https://github.com/samuelcolvin/pydantic/blob/master/pydantic/dataclasses.py#L186\r\nIf you want to open a fix you're more than welcome 😉\r\nIf not I'll have a look and open a fix end of next week! Thanks for reporting!":1,"I managed to catch the behaviour of a checkpoint experiment being collapsed to a single record without a name (and then returning to all checkpoint):\r\n\r\nhttps://user-images.githubusercontent.com/37993418/166836504-6e839a0f-6d8f-4226-835c-6f0203b47412.mov\r\n\r\n\nFor the problem in `Running checkpoint experiments from the queue`\r\n\r\nhttps://github.com/iterative/dvc/pull/7684#issuecomment-1116867478\r\n\r\nUnexpected behaviour:\r\n\r\n> 1. Only a couple of checkpoints are shown for each experiment until they are both finished and all are then copied to the workspace.\r\n> 2. The running experiment record is shown in a different record to the rest of the experiment.\r\n> 3. The running experiment record returns to queued.\r\n\r\n\r\nProblems 2` and `3` had already been solved in #8369 \r\n\r\nFor the problems in `Running checkpoint experiment in the workspace`\r\n> 1. exp show returns a single dict for a set of checkpoints during an experiment. This happens intermittently and breaks our \"live experiment\" tracking.\r\n> 2. exp show shows running experiments as not running mid-run.\r\n\r\n\r\nProblem 2` also had already been solved in #8369 and for problem 1, I tried and it still exists. \r\n\r\n\r\nSo we can focus on problem 1 in both sections. BTW, the epochs finished so quickly in the demo (less than 1S) and that makes the live tracking can not be shown one by one.\r\n\nFor the problem.\r\n\r\n1. Only a couple of checkpoints are shown for each experiment until they are both finished and all are then copied to the workspace.\r\n\r\nI do some investigation on this problem yesterday. The problem comes from the refs downloaded from\r\n\r\nhttps://github.com/iterative/scmrepo/blob/d175b923c76494c9023ee3581b349191ea2c8a6f/src/scmrepo/git/backend/dulwich/__init__.py#L658-L668 \r\n\r\n```python\r\n try:\r\n fetch_result = client.fetch(\r\n path,\r\n self.repo,\r\n progress=DulwichProgressReporter(progress)\r\n if progress\r\n else None,\r\n determine_wants=determine_wants,\r\n )\r\n except NotGitRepository as exc:\r\n raise SCMError(f\"Git failed to fetch ref from '{url}'\") from exc\r\n\r\n```\r\n\r\nwill cause \r\n\r\nhttps://github.com/iterative/scmrepo/blob/d175b923c76494c9023ee3581b349191ea2c8a6f/src/scmrepo/git/backend/dulwich/__init__.py#L671-L681\r\n\r\n```python\r\n for (lh, rh, _) in fetch_refs:\r\n refname = os.fsdecode(rh)\r\n if rh in self.repo.refs:\r\n if self.repo.refs[rh] == fetch_result.refs[lh]:\r\n result[refname] = SyncStatus.UP_TO_DATE\r\n continue\r\n try:\r\n check_diverged(\r\n self.repo, self.repo.refs[rh], fetch_result.refs[lh]\r\n )\r\n```\r\n\r\nraise `DivergedBranches`, and the problem behind this error is that the sha value `fetch_result.refs[lh]` does not exist in the repo and will cause `KeyError`. This will cause any updates (fetching) fails, only after the training progress is finished can the fetching succeed. This might come from the new commits coming to fast in the temp workspace because if we add a time gap into the training progress, for example, adding `time.sleep(5)` in each training epoch, the fetching can success and the training can be shown in living progress.\r\n\r\n\"image\"\r\n\r\nSo if we want to solve this problem completely we might need to go deep into the `dulwich`'s fetching progress and make sure the revision returned is also properly downloaded and exists. And if I use force update here, \r\n\r\nhttps://github.com/iterative/dvc/blob/43a8eab2e053b072b93d7b399ce0678cb00f138e/dvc/repo/experiments/queue/utils.py#L41-L42\r\n\r\nIt will raise `Invalid commit` Exception.\nmight be solved after #8477 if we use `pygit` as new backends for fetch.\nI did some research today, and found that the problem I force it to update comes from we are using different backends, the `scm.resolve_commit` is `pygit` backend and it can not properly resolve the force updated commit fetched by from `dulwich` . \r\n\r\nConfirmed that the problem comes from Dulwich. because I saw the ref fetched by Dulwich invalid.\r\n\r\n```bash\r\ngit commit \r\nAuto packing the repository in background for optimum performance.\r\nSee \"git help gc\" for manual housekeeping.\r\nerror: refs/exps/2a/1c3a54209af04dda29df9fc9309d8e93fcaf45/exp-710c6 does not point to a valid object!\r\n```":1,"This is not currently possible. It won't be added immediately, but I'd definitely consider adding something like this in future.\r\n\r\nThe way I had previously thought it would work, would be to add a model as an argument to `validate_arguments`, so it would be something like:\r\n\r\n```py\r\nfrom datetime import datetime\r\nfrom pydantic import BaseModel, validate_arguments, Field\r\n\r\nclass FooModel(BaseModel):\r\n dt: datetime = Field(default_factory=datetime.now)\r\n\r\n@validate_arguments(model=FooModel)\r\ndef foo(dt: datetime):\r\n print(dt)\r\n```\r\nThen `validate_arguments` is just checking that `FooModel` matches the arguments of `foo()`.\r\n\r\nYour `Field` approach would be slightly shorter, but might upset mypy. I'd be open to considering both.\nOK, thanks. I will keep this issue open if necessary.\nI didn't even realise that Field wasn't supported in `validate_arguments` (didn't read the docs properly), so I didn't go looking for an issue until I tried to use it for a default argument. I've submitted a PR that fixes this particular case and doesn't break any of the other tests, but I've probably missed something so I'd welcome someone else's opinion on this.\r\n\r\nAlso, mypy seems happy with this, it doesn't complain about the default value, and if you pass a value of a different type it will catch it\nFor reference, the way I am using this at the moment:\r\n\r\n```python\r\nclass Line(BaseModel):\r\n key: Any = Field(..., description=\"An identifier for what to move\")\r\n start: float = Field(..., description=\"Centre point of the first point of the line\")\r\n stop: float = Field(..., description=\"Centre point of the last point of the line\")\r\n num: int = Field(..., ge=1, description=\"Number of points to produce\")\r\n\r\n @classmethod\r\n @validate_arguments\r\n def bounded(\r\n cls,\r\n key=key,\r\n lower: float = Field(..., description=\"Lower bound of the first point of the line\"),\r\n upper: float = Field(..., description=\"Upper bound of the last point of the line\"),\r\n num: int = num,\r\n ):\r\n half_step = (upper - lower) / num / 2\r\n return cls(key=key, start=lower+half_step, stop=upper-half_step, num=num)\r\n\r\n ...\r\n```\r\n\r\nI then use the Field info to make a JSON schema of the classes, and create a GraphQL query of both class constructors and alternative classmethod constructors\r\n\r\nThe re-use of the descriptions from Field definitions is a useful pattern":1,"Thanks @chenrui333 for the report. This is broken due to https://github.com/iterative/dvc/pull/9760. Will fix shortly.":1,"See #4484.\r\n\r\n@PrettyWood, any ideas?\nI'll check":1,"Regarding the tests, as initial support, it would be totally fine to have some manual confirmation that the author of the PR has ran it and it works for him. But the next step could be to adjust our existing hdfs testing infrastructure (it spins docker container https://github.com/iterative/dvc/blob/master/tests/docker-compose.yml) to use webhdfs port too and create a `webhdfs` fixture in https://github.com/iterative/dvc/blob/master/tests/remotes/hdfs.py , that we could then add to the list in https://github.com/iterative/dvc/blob/c16d3fa43d144dc800aea7d7aa80f772bab4312b/tests/func/test_data_cloud.py#L24\nSome info about performance:\r\n\r\n```\r\nFor light load applications, WebHDFS and native protobuf RPCs provide comparable data throughput, but native connectivity is generally considered to be more scalable and suitable for production use.\r\n```\r\n\r\nfrom https://wesmckinney.com/blog/python-hdfs-interfaces/ . Though clearly it will depend on particular library, there might be some bad ones. But still, this is an experiment :slightly_smiling_face: ":1,"The solution is to allow naked `Dict` and treat it like `dict`.\r\n\r\nThis was referenced at https://github.com/samuelcolvin/pydantic/issues/545#issuecomment-495943391\r\n\r\nPR welcome to fix this.\nThe solution is to allow naked `Dict` and treat it like `dict`.\r\n\r\nThis was referenced at https://github.com/samuelcolvin/pydantic/issues/545#issuecomment-495943391\r\n\r\nPR welcome to fix this.":1,"Hello @umesh-timalsina \r\nThis is a known \"issue\", which may or may not be changed in v2. It reminds me of #265, which could help you. I'm currently not with my computer sorry\nHello @PrettyWood. Thanks for the response. I will check the issue out.":1,"For reference, the intended behavior is:\r\n\r\n> dvc exp run should refuse to assign names with white space in them,\r\n\r\nExperiment names must be valid Git ref/branch names and cannot contain whitespace (https://git-scm.com/docs/git-check-ref-format)":1,"Hi @bobertlo !\r\n\r\nGood idea! We've had similar discussions in https://github.com/iterative/dvc/issues/1576 . Would you say that you would expect to see this option enabled by default? Or just as an explicit flag? \r\n\r\n\nRe #1576 definitely not committing. I think an explicit flag would be good. Maybe a config option would be more appropriate for enabling this mode by default. Not something you would want to flip on in a minor release at least.\n@bobertlo Config option sounds very good. Could totally start with that and then consider switching to that by-default in 2.0. :+1: \r\n\r\nThe implementation itself should be pretty simple, as we have a reminder message already with the list of edited files, so we could totally just use it to git-add something. https://github.com/iterative/dvc/blob/master/dvc/repo/scm_context.py#L6 . And the config option is easy to add in https://github.com/iterative/dvc/blob/master/dvc/config.py . Let us know if you'll have any questions :slightly_smiling_face: Check out our contrib guide https://dvc.org/doc/user-guide/contributing/core and feel free to ping us in #dev-talk channel on discord :slightly_smiling_face: Thank you for looking into this! :pray: \nThanks for the info! I'll start working on a draft of this feature as soon as I have time.":1,"Thanks @rubenpraets for the great research, this is an issue in `adlfs` where it should be fixed.\r\n\r\nSee https://github.com/fsspec/adlfs/blob/master/adlfs/spec.py#L285-L300 (it even refers to the issue you mentioned 🤷🏼).\r\n\r\n":1,"I suspect this is because internally `parse_obj_as` creates a model with `__root__` set to its first argument, so effectively you're doing\r\n\r\n```py\r\nfrom pydantic import BaseModel\r\n\r\nclass OperationData(BaseModel):\r\n id: str\r\n\r\nclass Operation(BaseModel):\r\n __root__: Tuple[int, OperationData]\r\n\r\nclass ParseAsObjectModel(BaseModel):\r\n __root__: Operation\r\n```\r\n\r\nwhich then fails, should be fixable via either or both:\r\n1. getting the above to work, even though it looks weird\r\n2. `parse_as_obj` detecting a model (perhaps a model with a custom root type) as it's first argument and using that model instead of creating another one.\r\n\r\nIt occurs to me that 1. might not be possible (or possible without complex or backwards incompatible changes) due to the way we handle custom root types.\r\n\r\nThe second option therefore might be easier.\nFor what it's worth, the following change seems to make the error go away:\r\nChange:\r\n\r\nhttps://github.com/samuelcolvin/pydantic/blob/e4cd9d2c87b0e6f42366b617de6441f8cd561085/pydantic/main.py#L549-L554\r\n\r\nto\r\n\r\n```python\r\n else:\r\n if cls.__custom_root_type__:\r\n value_as_dict = {\"__root__\": value}\r\n else:\r\n try:\r\n value_as_dict = dict(value)\r\n except (TypeError, ValueError) as e:\r\n raise DictError() from e\r\n return cls(**value_as_dict)\r\n```\r\n\r\nI don't necessarily think this is the optimal fix, but it might be useful as a starting point.":1,"Hi @charlesbaynham ! Could you please show `dvc version` output? \nFor the record: can reproduce this myself as well, by modifying md5 in a dvc file by-hand. We've had a similar issue with pyyaml earlier, need to check this.\nOk, so both pyyaml and ruamel are affected, so we need to tweak our SCHEMA instead. Working on a fix...":1,"Hi @ofek \r\n\r\npydantic allows you to use arbitrary classes for validation with the setting `arbitrary_types_allowed` (cf. https://pydantic-docs.helpmanual.io/usage/model_config/): \r\n\r\n```py\r\nfrom __future__ import annotations\r\n\r\nfrom immutables import Map\r\nfrom pydantic import BaseModel\r\n\r\nclass Model(BaseModel):\r\n class Config:\r\n arbitrary_types_allowed = True\r\n\r\n test: Map\r\n\r\nprint(type(Model(test=Map(key=42)).test))\r\n# > \r\n```\r\n\r\nI hope this help !\nHi @ofek \r\nThe problem with @rhuille solution is that validation won't be done. If you still need it, see https://github.com/samuelcolvin/pydantic/issues/2311#issuecomment-771794594\r\n\r\n-------\r\n@samuelcolvin We already support quite well `Sequence` by having a dedicated check and trying to return the original type\r\nhttps://github.com/samuelcolvin/pydantic/blob/bd9c5723c676395363689549268738153e45a7e5/pydantic/fields.py#L649-L666\r\nMaybe we could do something alike ? I guess something quite easy like https://github.com/PrettyWood/pydantic/pull/70/files could already handle most mapping types (works with OrderedDict, DefaultDict, Map, FrozenDict, ...)\r\nWDYT ?\nHello @rshin,\r\nYou are right it comes from the default mapping validator, which always returns a `dict`.\r\n@samuelcolvin A potential fix could be something like [this](https://github.com/samuelcolvin/pydantic/compare/master...PrettyWood:fix/dict_validation?expand=1). maybe with a try...except\r\nAre you willing to consider it in which case I open a PR?\nSurely better to implement a validator for `DefaultDict`, thus avoiding any performance impact on normal dictionaries?":1,"@DavidGOrtega The issue here is `--no-exec`. It only creates the dvc-file but doesn't act on it, hence why `models` is not added to `.gitignore` until you actually run it. Could you explain why you need it/why it matters?\n@efiop This issue is actually a bit misleading since the issue is not only the inclusion in gitignore but also is not tracked by DVC until you run repro. It matters specially in CI/CD space. \r\nThe workflow is this:\r\n - setup a dvc pipeline without running repro locally\r\n - push to repo\r\n - CI pull and run repro\r\n\r\nWhen CI does dvc pull errors appears (having to use -force) missing caches or models not tracked.\r\n```\r\nERROR: failed to pull data from the cloud - Checkout failed for following targets:\r\n models\r\n Did you forget to fetch?\r\n```\r\nI'm actually reviewing this. I will come back with more info to update this.\n@DavidGOrtega thank you for reporting this! Was you able to find any workaround?\r\n\r\nIt seems like a valid use case which is not fully supported by DVC.\r\n\r\n@efiop is there any specific reason not to create outputs with `--no-exec`?\r\nIf we need to support this scenario, what would be your suggestion: create outputs or introduce a new option?\n@dmpetrov We could totally do that automatically, I'm just trying to undersatnd the reasons behind it and if we really need it. So far the use case is valid, so I think we could proceed with implementing it.":1,"Related: https://github.com/samuelcolvin/pydantic/issues/1091\nApparently, when calling\r\nhttps://github.com/samuelcolvin/pydantic/blob/master/pydantic/main.py#L221\r\nand then\r\nhttps://github.com/samuelcolvin/pydantic/blob/master/pydantic/fields.py#L325 \r\n\r\n`info_from_config`, fetched from a config, is not actually a part of the field yet (`self.field_info`)\r\n\r\nand thus on \r\n\r\nhttps://github.com/samuelcolvin/pydantic/blob/master/pydantic/env_settings.py#L79 \r\n\r\nwe get a default info\nThanks for pointing this out.\r\n\r\nPR welcome to fix it, I've no idea how hard it would be to solve, maybe quite difficult.":1,"Thank you @iesahin, this is great!\r\n\r\n> * [x] `--no-run-cache`: seems to have no effect ❌\r\n\r\n> * [x] `--no-commit`: seems to have no effect ❌\r\n\r\nIt makes sense to me that these wouldn't work since experiments need to be able to cache data to retrieve it. I'm fine with removing these unless anyone else objects.\r\n\r\n\r\n> `--glob`: ❌\r\n\r\nThis probably still needs to be fixed in #6457. We may need to bump the priority up unless anyone has a reason not to, in which case we should remove the option for now.\r\n\r\n> * [x] `--interactive`: ✅\r\n\r\nHm, does this make sense for experiments? How does it work with checkpoints or experiments outside of the working directory?\n> It makes sense to me that these wouldn't work since experiments need to be able to cache data to retrieve it. I'm fine with removing these unless anyone else objects.\r\n\r\nI think we should remove these even if someone objects. 😄 We can ask for PRs from them. \r\n\r\n\r\n\r\n> Hm, does this make sense for experiments? How does it work with checkpoints or experiments outside of the working directory?\r\n\r\nInteractive reproduction may lead to some unforeseen results about the experiments and checkpoints. It's identical with the basic `dvc exp run` if you say `y` to all stages, but what happens if you skip some of these stages, I don't know. (e.g., do we generate `exp-id` from the whole pipeline or only the stages that run?)\r\n\r\nI believe, in general, experiments should not be granularized by stages, we already have checkpoints for this. Interaction between these two features may lead to some inconsistent behavior. \n> I think we should remove these even if someone objects. 😄 We can ask for PRs from them.\r\n\r\n😄 I meant someone from @iterative/dvc \r\n\r\n\r\n\r\n> Interactive reproduction may lead to some unforeseen results about the experiments and checkpoints. It's identical with the basic `dvc exp run` if you say `y` to all stages, but what happens if you skip some of these stages, I don't know. (e.g., do we generate `exp-id` from the whole pipeline or only the stages that run?)\r\n> \r\n> I believe, in general, experiments should not be granularized by stages, we already have checkpoints for this. Interaction between these two features may lead to some inconsistent behavior.\r\n\r\nAgreed. I think we should drop this option.\nI believe if some option from `dvc repro` seems not relevant clearly, we can better drop it from `dvc exp run`. If we decide on a clear behavior later, we can revive them. :)\r\n\r\nIf _stage selection_ is not particularly clear, we can drop `--glob` as well. It's about stage selection. \n> If _stage selection_ is not particularly clear, we can drop `--glob` as well. It's about stage selection.\r\n\r\nI'm not sure how often `--glob` will be needed, but `exp run` takes a stage name as a target, so stage selection is included. Most of these options are about stage selection. Looking at #4912, I can see this being just as useful in `exp run` as in `repro`.\nI think, for the time being, it's better to remove these options from the help text and the documentation. If we get some feedback about the lack of these, we can create new tickets and move forward.\nHey everyone! I'm having an issue with `dvc repro --glob` where the pattern I'm passing is being ignored. \r\n (i.e. all target stages match instead of just those specified by my simple pattern).\r\n\r\nIs it possible that `--glob` is currently being ignored by `dvc repro` :question: \r\nNot sure if it's related, but the `glob_targets` method only appears on the `pull`, `push` and `add` commands (in [this search](https://github.com/iterative/dvc/search?q=glob_targets)) which, along with this issue, led me to think that `--glob` might not be supported by `dvc repro` yet.\r\n\r\nSince this issue looked _active_ and similar to my problem, I thought it'd be easier to ask here instead of opening a possibly duplicate issue. Apologies if I should have proceeded differently.\r\n\r\nThank you so much!\r\n\r\nPD: I'm running version `2.7.2` (I skimmed changes up to the latest in the releases page but didn't see anything related to `glob` nor `repro`, so I didn't bother upgrading)\n@erovira `dvc repro --glob` works as expected for me. Could you open a separate issue if you are seeing an issue with it?\nHey @dberenbaum , sure! I'll see if I can come up with a minimal reproducible example and create an issue. Thanks for answering!\r\n\r\n**EDIT:** No need to open a new issue. I found out what my mistake was.\r\nSuppose I have three stages with the following DAG: `A` -> `prefix-B` -> `prefix-C`.\r\nI was trying to do a repro of B and C just by doing: `dvc repro --glob prefix-*`, but I was seeing `A` being run and thought `--glob` wasn't working. As I now (think I) understand, A is run because of the default recursive search for changed dependencies.\r\n\r\nCombining `--single-item` and `--glob` like so `dvc repro --single-item --glob prefix-*` did the trick for me.\r\n\r\nThanks again!\n> --no-commit: seems to have no effect ❌\r\n\r\nI unchecked this since the option is still shown and accepted (2.8.1).\r\n\r\nDidn't double check any other ones.\nThose checkmarks show that I've checked them by running the command. :) @jorgeorpinel \nOK. A bit confused by the check on one side and the x on the other.\r\n\r\n![image](https://user-images.githubusercontent.com/1477535/146501297-2e9c89c1-fa03-4cc6-bb9d-eab46d3eabee.png)\nAll of these commands have been tested here or in https://github.com/iterative/dvc.org/issues/2861. We need to remove `--no-run-cache`, `--no-commit`, and `--glob` (unless someone prefers to address #6457).\r\n\r\ncc @pmrowla @karajan1001 ":1,"@BaruchYoussin, you probably meant @efiop :)\nI am using the name he uses at discord where I had a conversation with him.":1,"Thanks for tagging me. Yep. I think it makes sense.\r\n\r\nIf we can keep supporting it for a while with a deprecation warning, we should be fine.\r\n\r\nAnd thinking about it, it actually makes more sense for it to be `Field`.\r\n\r\n---\r\n\r\nSlightly related, currently `Schema` (soon to be `Field`) is a class. When declaring a Pydantic field with:\r\n\r\n```Python\r\na: int = Schema(...)\r\n```\r\n\r\n`mypy` complains that `a` should be of type `int` but is being of type `Schema`.\r\n\r\nOne way I'm \"hacking\" it in FastAPI (for stuff that inherits from `Schema`) is to create a function with a return type annotation of `Any` (while it is returning a `Schema` instance).\r\n\r\nThen because the return of the function is annotated as `Any`, `mypy` accepts it.\r\n\r\nI think this is something that could be interesting to have here too, maybe worth considering during this change.\nIn favour of this change too.\r\n\r\nThis seems to align better with dataclass terminology too, https://docs.python.org/3/library/dataclasses.html#dataclasses.field. \r\n\r\nFor consideration: is there a possibility that the similarity would be a bad thing, rather than good?\nI agree with @jasonkuhrt that using a `field` function would be nice. Being a function that actually returns a class would fix `mypy` errors when using `Schema` (or the next, `Field`), as a function can be typed to return `Any`. In fact, I'm doing something like that in FastAPI.\r\n\r\nAlso, by implementing a `field` function right away we could update the docs and show the new interface to new users, even while `Schema` is not yet renamed to `Field` (and the current `Field` to something else).\r\n\r\nI wanna take a shot at this.\r\n\r\nI have some naming questions:\r\n\r\n* Is it OK a `field` function exposed as the main interface instead of the corresponding class?\r\n * Would you prefer another function name? Or a capitalized version (a function `Field`) that simulates the class name, even while it's a function?\r\n* What would be an acceptable name for the current `Field` class?\r\n\r\n---\r\n\r\nOn the other side, having just a function `field` that returns a `Schema` class would solve the visible naming issue for users, that could be enough, without needing to rename the underlying classes. The drawback is that the internal names would be more difficult for newcomer contributors to the internal code.\nI think the function should be called `Field`, it should have an alias called `Schema` that also returns a `Field` but also raises a warning.\r\n\r\nThe only question is what do we call the class? It would be confusing to also call it `Field`, how about `FieldInfo`?\r\n\r\nPlease take a shot. I want to work on v1 features once v0.32 is released.\nPerfect.\r\n\r\nSo, the *current* `Schema` class will be called `FieldInfo`?\r\n\r\nAnd how should the *current* `Field` class be called? `FieldAttr`, `FieldObject`, `FieldElement`, `Attr`, (something else). Or should we keep it just `Field`?\r\n\r\n---\r\n\r\nIn summary, the next parts will be:\r\n\r\n* A `Field` *function* that returns a `FieldInfo` *class* instance.\r\n* A `Schema` *function* that returns a `FieldInfo` *class* instance and logs a warning.\r\n* A `FieldInfo` *class* that will replace the current `Schema` *class*.\r\n* A (what?) *class* that will replace the current `Field` *class* (or do we just keep this `Field` *class*)?\nLooks good, I think current field becomes `Attribute`, we should also rename that file.\nCorrection, `Attribute` is a dumb name since then you would have `model.__fields__: Dict[str, Attribute]` which is confusing.\r\n\r\nBetter to rename what used to be called `Field` as `ModelField`?\n+1 for `ModelField`, `FieldInfo`, and `Field` function that returns a `FieldInfo`\nThis may be controversial, but the library **attrs** supported typing (before python 3.6) using a type parameter. `Field` could have two positional arguments (type and default) then the `Field` function could actually return the correct type (using a Generic).\r\n\r\n```python\r\nclass Foo(BaseModel):\r\n maximum = Field(int, 100)\r\n\r\n# this would require a different code path\r\nclass Bar(BaseModel):\r\n maximum: int\r\n\r\n# this would be bad but possible\r\nclass Foo(BaseModel):\r\n maximum: int = Field(str, \"100\")\r\n```\r\n\r\nWould having the type as an arg to the `Field` method provide any additional benefits?\nI see a variety of approaches to the possible signature of the `Field` function; I've included my breakdown below. (I use `FieldType` to refer to the type of the object actually returned by `Field`.)\r\n\r\nI'm most in favor of one of the first two approaches, and would be fine with the first one (the current proposal).\r\n\r\n* Proposed implementation: `def Field(default: Any, **etc) -> Any`\r\n * (All pros/cons below are relative to this implementation)\r\n* The following overloaded approach (which may require some minor tweaks)\r\n ```python\r\n @overload\r\n def Field(default: T, **etc) -> T: ...\r\n @overload\r\n def Field(default: None, **etc) -> Any: ...\r\n @overload\r\n def Field(default: Ellipsis, **etc) -> Any: ...\r\n def Field(default: Any, **etc) -> Any: ...\r\n ```\r\n * Pro: type-checks default value against the annotated type where possible\r\n * Con: Incorrect type hint if you actually want to treat the result as a `FieldType`\r\n * Conclusion: currently my favorite alternative to the proposed implementation; depends on whether we might ever want to work with the returned value of the `Field` function as a `FieldType`.\r\n* `def Field(type_: Type[T], default: T, **etc) -> T`\r\n * Pro: type-checks the default value against the specified type\r\n * Con: Incorrect type hint if you actually want to treat the result as a `FieldType`\r\n * Con: Requires you to repeat the type if you want to use an annotation. **If you *don't* use an annotation, then the fields may occur out of order, potentially leading to confusing validation errors.**\r\n * Conclusion: Given the second con, I would probably prefer the prior approach.\r\n* `def Field(type_: Type[T], default: T, **etc) -> Any`\r\n * Pro: type-checks the default value against the specified type\r\n * Con: Even more so than the previous example, encourages you to drop the annotation to prevent them from getting out of sync (since there is no check they are equal)\r\n * Conclusion: given the field-ordering issue described above, I would probably prefer the overload-heavy implementation (despite getting an incorrect type hint for `FieldType`)\r\n* Approaches where `FieldType` is generic and `Field` returns a `FieldType[T]` (this is actually what `attrs` does, though the library mechanics are sufficiently different to cause different issues)\r\n * E.g.:\r\n * `def Field(default: T, **etc) -> FieldType[T]` \r\n * `def Field(default: T, type_: Type[T], **etc) -> FieldType[T]`\r\n * Pro: provides the type checker with the most accurate information\r\n * Con: will cause mypy errors when used to annotate fields in the absence of a mypy plugin; *with* a mypy plugin, this is probably unnecessary anyway.\r\n * Conclusion: Probably not even an improvement over the current implementation\r\n\r\n@skewty do those points change your thinking at all?\n@dmontagu I hadn't mentally considered \"Con: Incorrect type hint if you actually want to treat the result as a FieldType\". Thank you.\r\n\r\nGiven the above, I would choose method 1.\r\n\r\n\n@skewty for what it’s worth, I’m not sure it’s actually that big of a downside for the following reasons:\r\n\r\n1. You can always use the `FieldType` class directly\r\n1. Internally, there is little reason to use the `Field` function anyway\r\n1. Externally, there is little reason to need to interact with the result of `Field` as a `FieldType` anyway\r\n1. Type hinting as Any prevents any type safety benefits anyway, so if `Field` is going to be rarely/never used to produce something interacted with as a `FieldType`, it might make sense to just use `# type: ignore` in the few places you use for that purpose anyway.\r\n\r\n":1,"Locally it is still beneficial for us to use the .dir files for performance reasons. The dvc-data object DB isn't aware of the DVC repo and pipeline files within the repo, and it's faster to do directory tree lookups via the content-addressed storage version of the .dir file than it is to collect outs from all of the possible pipeline files in the local repo.\n> it's faster to do directory tree lookups via the content-addressed storage version of the .dir file than it is to collect outs from all of the possible pipeline files in the local repo.\r\n\r\nMaybe I'm naively assuming all operations are file-based and missing how the DB is used, but even if DVC needs the `.dir` entry in the database and in the `.dvc` file above, does DVC actually need to save a copy of the `.dir` file in `.dvc/cache`? Even if the DB were deleted, it seems like the `.dvc/cache/...dir` file is only needed when called from a `.dvc` file that contains the same info already and could rebuild its contents. The only meaningful differences I see between the `.dvc` file and the `.dir` file is YAML vs. JSON parsing speed (which I think is a valid and important concern), but the `.dvc` file needs to be parsed anyway.\nHaving a `.dir` entry in the metafiles breaks the scenario when cloud versioning could be used to simplify the workflow that includes updating the directory. Namely, ideally we would not need a special merge driver, etc.\r\n\r\nIt's described here - https://github.com/iterative/iterative.ai/issues/690. And unfortunately, we won't be able to use it until it's solved.\r\n\r\nIf we use it only locally, we should definitely find a way to move outside the use-facing metadata.\nIs it possible to just delete the .dir entries from the .dvc files when encountering merge conflicts?\nFrom the support channel:\r\n\r\n> We work with documents (PDF, PNG, JPEG) and recently switched from GitLFS to DVC for security and efficiency reasons. However, we are currently experiencing some issues with its use because our annotation team, which previously only worked with Git, now has to deal with DVC as well. Conflicts can be frequent and are difficult to resolve for them. Additionally, we are having challenges with pull request reviews, as before it was directly done through our user interface (Bitbucket), and it was easy to see the differences on JSON/text files.\r\n\r\n\n@shcheklein Do you have a link for the support request? I haven't seen that one.\r\n\r\nI don't mean to push back on the feature, which is planned for the current sprint, but I want to get some clarification in case something's not working as expected with our merge driver after it was updated in https://github.com/iterative/dvc/pull/8360.\r\n\r\n> Additionally, we are having challenges with pull request reviews, as before it was directly done through our user interface (Bitbucket), and it was easy to see the differences on JSON/text files.\r\n\r\nShould we add this comment to #770?\nI was en email to support@iterative.ai (let me know if you don't see them). \r\n\r\n> Should we add this comment to https://github.com/iterative/dvc/issues/770?\r\n\r\nYes, also wonder if @iterative/cml could help here with a report that does diffs?\n@shcheklein This may be hard to accomplish solely for cloud versioning since it is unknown during `dvc add` that this will later be pushed to a cloud-versioned remote. Some options discussed with @daavoo (TBH I don't really like them):\r\n\r\n* Encourage `dvc add --to-remote` to push at the same time as adding, and skip the .dir file.\r\n* Delete the .dir file from .dvc during `dvc push` to a cloud-versioned remote.\r\n* Same as the options above, except the .dir file gets hidden somewhere that's not tracked instead of being deleted.\r\n\r\nThe other option is to drop/hide .dir entries for all .dvc files, not only cloud-versioned ones. It might make sense, but I think we are probably not ready to do that today because we need to use cloud versioning to test out how well it works and scales. Without researching too much, I would also guess it would take much more effort.\n> that this will later be pushed to a cloud-versioned remote.\r\n\r\nQ: do we need to support mixed modes? can we make them exclusive (I know this was probably discussed before, I wonder if that is a scenario from a user perspective that we want to maintain?)\r\n\r\n> Delete the .dir file from .dvc during dvc push to a cloud-versioned remote.\r\n\r\nThat can be better than nothing, we already do an update to the file, right?\r\n\r\nI think if .dir is hidden is fine, if it doesn't affect users.\n> Q: do we need to support mixed modes? can we make them exclusive (I know this was probably discussed before, I wonder if that is a scenario from a user perspective that we want to maintain?)\r\n\r\nI'm not sure I follow how we would make it exclusive. When you do `dvc add`, would we ask if you plan to push to a cloud-versioned remote? Maybe it's related to the `dvc add --to-remote` suggestion, where we are not only adding locally but also doing some operation on the remote simultaneously?\r\n\r\n> > Delete the .dir file from .dvc during dvc push to a cloud-versioned remote.\r\n> \r\n> That can be better than nothing, we already do an update to the file, right?\r\n\r\nYes, so far it may be the best option we have if we need to avoid .dir entries.\r\n\r\nIt would require some changes in the .dvc file and how we read data locally. Right now, it looks like:\r\n\r\n```\r\nouts:\r\n- md5: 22e3f61e52c0ba45334d973244efc155.dir\r\n size: 64128504\r\n nfiles: 2800\r\n path: cats-dogs\r\n files:\r\n - size: 16880\r\n version_id: CxBqqlHnOmHSG09sYlP36Mytf.48xmaJ\r\n etag: ed779276108738fdb2179ccabf9680d9\r\n md5: ed779276108738fdb2179ccabf9680d9\r\n relpath: data/train/cats/cat.1.jpg\r\n - size: 34315\r\n version_id: 4QWq043ohGMxOk_pilnTkVPnJFJEV2lc\r\n etag: 10d2a131081a3095726c5721ed31c21f\r\n md5: 10d2a131081a3095726c5721ed31c21f\r\n relpath: data/train/cats/cat.10.jpg\r\n...\r\n```\r\n\r\nOther than YAML validation failing, it basically works if the format instead looks like:\r\n\r\n```\r\nouts:\r\n- size: 16880\r\n version_id: LgpYHQnSyJ9EF0sJV1KWv58p7wOK.sHL\r\n etag: ed779276108738fdb2179ccabf9680d9\r\n md5: ed779276108738fdb2179ccabf9680d9\r\n path: cats-dogs/data/train/cats/cat.1.jpg\r\n- size: 34315\r\n version_id: wBYroaUEZSYH088HetJD.0XfaDUjdhAF\r\n etag: 10d2a131081a3095726c5721ed31c21f\r\n md5: 10d2a131081a3095726c5721ed31c21f\r\n path: cats-dogs/data/train/cats/cat.10.jpg\r\n```\r\n\r\n(There need to be some unrelated changes to the cloud fields `etag` and `version_id` because of https://github.com/iterative/dvc/issues/8356, and the field ordering could probably be improved also)\r\n\r\nIf we make changes here, they should probably be blockers for release since they are likely to break the .dvc files.\r\n\r\nTheoretically, the same changes could be useful without cloud versioning, but maybe it's good to make cloud versioning a testing ground. It may also relate somehow to https://github.com/iterative/dvc/issues/4657#issuecomment-1372440937.\r\n\r\n> I think if .dir is hidden is fine, if it doesn't affect users.\r\n\r\nIf we go with the format above, we could probably hash the .dvc file and keep a reference to it in a temp dir or database to do the same optimizations we do today with `.dir` files.\n> I'm not sure I follow how we would make it exclusive.\r\n\r\nDVC when a specific mode is set in its config and when it's trying to read, push, pull to/from some incompatible format should fail fast w/o trying to do anything with files, etc. E.g. if a config set to operate in a cloud versioning format it won't be able to do `dvc push` to a regular remote at all.\r\n\nThanks @shcheklein. What do you think about trying to keep the formats compatible and dropping .dir entries on push for now as described above? Seems like a simpler starting point and prevents cloud versioning from diverging too much from the existing dvc workflow for now.\nIf it's simpler to implement and maintain then yes, it makes sense of course.\nNeed to catch up on this 😅 . \r\n\r\nJust wanted to say that my current impression is that trying to:\r\n\r\n> prevents cloud versioning from diverging too much from the existing dvc workflow for now.\r\n\r\nIt is causing more problems than benefits (internally but also at the UX level, IMO). \nThis feature in particular is not really specific to cloud versioning except that cloud versioning started down the path of saving individual file entries into .dvc files, and I think that info isn't used for add/commit/etc. but instead only for remote transfer operations (cc @pmrowla). Another option here is to configure whether to use `.dir` files separate from cloud versioning with something like `dvc config dir false`.\n> This feature in particular is not really specific to cloud versioning except that cloud versioning started down the path of saving individual file entries into .dvc files, \r\n\r\nI didn't understand that the scope was to get rid of `.dir` files on all cases.\r\n\r\n> Another option here is to configure whether to use .dir files separate from cloud versioning with something like dvc config dir false.\r\n\r\nIf we want to make the scope for DVC in general, I think we should discuss that separately and reconsider prioritization as it would take a different effort and I am not even sure if we really want to drop `.dir` files for the regular workflow.\r\n\r\n> and I think that info isn't used for add/commit/etc. but instead only for remote transfer operations (cc @pmrowla). \r\n\r\nadd/commit/etc do behave differently if the `.dvc` is \"cloud-versioned\". \r\n":1,"I'm working on the issue":1,"This sounds like a bug, but it might need to wait for v2 to be fixed since it could be considered a breaking change. \nI have a fix for this, duplicating the datetime's method of handling timedeltas. The rest of the module doesn't seem refactored to reduce duplicated code. I assume this change is is okay / better by keeping it that way?\r\n\r\nThis is my work so far: https://github.com/samuelcolvin/pydantic/commit/1d59058a2ae6f4b0f2f119b8eddc1529c5789d86":1,"@gcoter, does the params.yaml exist? Can you give more verbose error message (and, your command that you tried)?\n@skshetry thanks for your quick answer! My command looks like this:\r\n```\r\ndvc run -d training.py -p config/params_1.json -p config/params_2.json:data.training,model -o trained_model/ -n training python training.py\r\n```\r\n\r\nI prefer to use JSON files so `params.yaml` does not exist in my project.\n@gcoter, is that a typo on `-p config/params_1.json` (i.e. there's no list of parameters eg: `-p config/params_1.json:loss`).\n@skshetry No, I thought that specifying `config/params_1.json` would select everything inside this file. Should I specify explicitly the keys I want to select?\n@gcoter, yes, you have to specify keys that you want.\n@skshetry Ok, indeed it works if I specify manually the keys. I thought that DVC would list all the keys by default. But I don't mind listing them myself. Thank you for your help!\n@gcoter, but, I can see it's usefulness. I'll log this as a feature request.\n@gcoter, another question, why do you choose to `-p` rather than tracking whole file? for granularity tracking of parameter values, right?\n@skshetry I am at the beginning of the project, so it is not very clear in my mind, but I would like to benefit from the `dvc params diff` command. Basically, if I use `-d`, I cannot diff the params. Well, I could use `git diff` but `dvc params diff` formats the diff in a nice way in my opinion. And since this file is actually used to store configuration settings, it makes sense to me to use `-p`.\n#### Simple workaround for json files\r\n\r\nDefine bash functions in `.bashrc`\r\n\r\n```\r\nfunction print_json_keys {\r\n json_file=$1\r\n if [ \"$2\" ]\r\n then\r\n sep=\"$2\"\r\n else\r\n sep=','\r\n fi\r\n jq -r '[paths | join(\".\")]' $1 | sed -n 's/\\ *\"\\(.*\\)\",*/\\1/p' | head -c -1 | tr '\\r\\n' $sep\r\n}\r\n\r\nfunction get_all_dvc_json_params {\r\n json_file=$1\r\n echo \"$json_file:$(print_json_keys $json_file)\"\r\n}\r\n```\r\n\r\nthen use, assuming you have params in `config.json`\r\n\r\n```\r\ndvc run -p $(get_all_dvc_json_params config.json)\r\n```\r\n\r\n@skshetry let me know if you think it makes sense to add this as default behavior for DVC when the parameters are not defined. I can to take add this to DVC if I receive some guidance.\nFor future reference, the file must be named `params.yaml`, not `params.yml` for DVC to see it.\n@lambdaofgod, sure, go ahead. We might just need to support it as `-p params.yaml` or `-p params.json` (without explicit `:`). \r\nBut, the main question is, what the behavior should be.\r\n\r\n1) Should it work like your script, i.e. fetch all params and track each of them individually?\r\n2) Or, should it just act like other output dependencies, in that the stage reproduces if you add/delete any params in the file?\r\n\r\nIf 1), then how should we specify them on `dvc.yaml`? Should that be limited to just this helper function?\nOption 2 makes sense to me because:\r\n\r\n1. If a new parameter is added, I would rather err on the side of being overly aggressive in detecting parameters changes rather than ignore it.\r\n2. It's clean and simple in `dvc.yaml` (list the file instead of breaking out each individual key).\r\n3. It seems consistent with how YAML section keys are handled now. I don't see much difference from the current handling of `-p foo` with a `params.yaml` like:\r\n\r\n```yaml\r\nfoo:\r\n bar: 1\r\n baz: 2\r\n```\r\n\nWe have two choices on syntax in `dvc.yaml`.\r\n\r\n#### 1. Using `null` to indicate track everything\r\n```yaml\r\nparams:\r\n - params.yaml:\r\n - config.yaml:\r\n```\r\n\r\n Pros:\r\n 1. This does not require us to create our own syntax/DSL, it is YAML.\r\n 2. (Better schema?)\r\n \r\n\r\n Cons:\r\n 1. This syntax requires filename to be explicitly specified, even for `params.yaml` file which we consider as being the default.\r\n 2. The semantic meaning of `null` to mean track everything is questionable. However, it depends on the narrative we want to set. Currently, the purpose of `params` is that it can track parameters in a granular way. If we do change that to suggest that DVC can track params as a whole, but it can also support things at a granular level, it does make sense as well.\r\n (This is closer to implementation).\r\n\r\n#### 2. Using `*`\r\n```yaml\r\nparams:\r\n - * # means track everything from params.yaml\r\n - config.yaml:* # means track everything from config.yaml\r\n```\r\n\r\nPros:\r\n1. Supports tracking `params.yaml` easily by just using `*`.\r\n\r\n Cons:\r\n1. Requires us to create our own DSL, which may be confused with being a regex.\r\n\r\n\n> We have two choices on syntax in `dvc.yaml`.\r\n\r\nWhy it's not possible to support the following?\r\n\r\n```yaml\r\nparams:\r\n - custom_params.yaml # full file\r\n - foo # dvc will search for this as usual\r\n```\r\n\nIt is ambiguous. The `foo` may be a params file, and `custom_params.yaml` may be just a hyperparameter `yaml` nested inside `custom_params`. \r\n\r\nNote that this is only a metadata, and the files don't have to necessarily be present in the workspace, eg: the file could have been deleted or be read from certain revisions, etc. so the fallback approaches are going to be ugly.\r\n\n> It is ambiguous. The `foo` may be a params file, and `custom_params.yaml` may be just a hyperparameter `yaml` nested inside `custom_params`. \r\n\r\nI find it more intuitive than the other syntax, this is what users usually try first and report as a bug when it fails.\r\n\r\nFor conflicts, we can document the behavior and add priority for resolving: `checking if {name} is in params.yaml > checking if {name} is a file and exists`. Although, tbh, I don't think it's common to use a parameters file without extension nor name a parameter with a extension.\r\n\r\n> Note that this is only a metadata, and the files don't have to necessarily be present in the workspace, eg: the file could have been deleted or be read from certain revisions, etc. so the fallback approaches are going to be ugly.\r\n\r\nNot sure if I understand why this is an issue only with this syntax\r\n\nIt's better to avoid ambiguity if possible. And, `diff` and `status` have stronger requirements.\r\nFor example, what will you show `dvc status` for `foo` if the `foo` file and `params.yaml` both don't exist at all? \r\n\r\nAlso `dvc` supports any arbitrary file (it just fallbacks to yaml), so `foo.bar` could be a params file or an xpath to that parameters.\r\n\r\n> I find it more intuitive than the other syntax, this is what users usually try first and report as a bug when it fails.\r\n\r\nI know it's intuitive, but I don't think we have a much better choice. Given that we consider this to be an advanced feature, it's okay to consider it as a tradeoff.\n> ```yaml\r\n> params:\r\n> - custom_params.yaml # full file\r\n> - foo # dvc will search for this as usual\r\n> ```\r\n\r\nI think we could come up with consistent logic to deal with ambiguities here, so I think we should focus on whether the intuitiveness/convenience outweighs the ambiguity.\r\n\r\nCan we assume any param that ends in `.yaml`, `.yml`, `.json`, `.toml`, or `.py` is a file path, and otherwise it's not? Is there a realistic scenario when this would be problematic?\r\n\r\n> It's better to avoid ambiguity if possible. And, `diff` and `status` have stronger requirements.\r\n> For example, what will you show `dvc status` for `foo` if the `foo` file and `params.yaml` both don't exist at all?\r\n\r\nDoesn't the use of `.` to specify nested keys already introduce ambiguity (how do we know whether this is a nested key or just a top-level key with `.` in it)? Is this different?\r\n\r\n> Also `dvc` supports any arbitrary file (it just fallbacks to yaml), so `foo.bar` could be a params file or an xpath to that parameters.\r\n\r\nAren't parameters limited to particular file types, and dvc uses the file extension to determine the type? It seems like the only added ambiguity is when dvc falls back to yaml, and I don't think it's unreasonable to expect `.yaml` or `.yml` there.\n> Can we assume any param that ends in .yaml, .yml, .json, .toml, or .py is a file path, and otherwise it's not? Is there a realistic scenario when this would be problematic?\r\n\r\nIt's a weird restriction to not be able to track keys named `yaml`/`yml`/json`/`toml`/`py`. These file extensions are likely to expand in the future.\r\n\r\n> Doesn't the use of . to specify nested keys already introduce ambiguity (how do we know whether this is a nested key or just a top-level key with . in it)? Is this different?\r\n\r\nWe don't support the keys with `.` at all. `.` is strictly a separator.\r\n\r\n> Aren't parameters limited to particular file types, and dvc uses the file extension to determine the type? It seems like the only added ambiguity is when dvc falls back to yaml, and I don't think it's unreasonable to expect .yaml or .yml there.\r\n\r\nyes, DVC uses file extensions to determine the type and falls back to yaml parser which supports `json` and `yaml` contents.\r\n\r\nOverloading the schema to track parameters in granular and in whole may be confusing. \r\n\r\n\n> ```yaml\r\n> params:\r\n> - params.yaml:\r\n> - config.yaml:\r\n> ```\r\n\r\nHow will this work in CLI? `dvc stage add -p config.yaml:`? Will that cause any issues?\r\n\r\n> It's a weird restriction to not be able to track keys named `yaml`/`yml`/json`/`toml`/`py`. These file extensions are likely to expand in the future.\r\n\r\nI think we can document that supported file extensions cannot be used as parameter keys.\r\n\r\n> yes, DVC uses file extensions to determine the type and falls back to yaml parser which supports `json` and `yaml` contents.\r\n\r\nSo if we rely on file extensions, the breaking changes would be:\r\n* Parameter keys like `yaml` or other file extensions are no longer supported.\r\n* YAML/JSON parameter files can't use nonstandard extensions.\r\n\r\n@skshetry Does that all sound right? If you think there's a good way to go with option 1 and have the CLI make sense without introducing breaking changes, I'm fine with that. Otherwise, I think the file extension approach is intuitive and the breaking changes aren't too bad as long as we document them.\n@daavoo mentioned that tracking params directories could come up in the future. It seems like option 1 would be better if we want to make future changes like this easier.":1,"My brief research seems to point into `dvc/repo/metrics/show.py::_to_fs_paths` as a culprit.":1,"Update: after adding missing `z: float = field(init=False)` in the `Base` class the z attribute is properly preserved (does not get lost between the `__post_init__` and instance completion). But the first issue - `__post_init__` not being called in the first place still stands.":1,"@helger Could you also show us your `$ dvc version` output, please?\n> @helger Could you also show us your `$ dvc version` output, please?\r\n\r\n```\r\n dvc version\r\nDVC version: 0.90.2\r\nPython version: 3.7.4\r\nPlatform: Linux-4.4.0-1104-aws-x86_64-with-debian-stretch-sid\r\nBinary: False\r\nPackage: pip\r\nSupported remotes: azure, gs, hdfs, http, https, s3, ssh, oss\r\nFilesystem type (workspace): ('ext4', '/dev/xvda1')\r\n```\n> > @helger Could you also show us your `$ dvc version` output, please?\r\n> \r\n> ```\r\n> dvc version\r\n> DVC version: 0.90.2\r\n> Python version: 3.7.4\r\n> Platform: Linux-4.4.0-1104-aws-x86_64-with-debian-stretch-sid\r\n> Binary: False\r\n> Package: pip\r\n> Supported remotes: azure, gs, hdfs, http, https, s3, ssh, oss\r\n> Filesystem type (workspace): ('ext4', '/dev/xvda1')\r\n> ```\r\n\r\n\nOutput of `pip freeze`\r\n\r\n[pip_freeze.txt](https://github.com/iterative/dvc/files/4370733/pip_freeze.txt)\nDiscord context https://discordapp.com/channels/485586884165107732/485596304961962003/691688363291312139\nThis happens because we try to create parent when trying to link from cache to workspace (and, here, parent of `s3:///` is `\"\"`, i.e. empty).\r\nhttps://github.com/iterative/dvc/blob/ee26afe65e8f9ebf11d7236fd3ee16a47f9b4fc6/dvc/remote/base.py#L380-L385\r\n\r\nIf the file is in `s3:////`, this won't fail.\r\n\r\nI don't think, we need to create parent directories for object-based storages except when checking out directory.\r\n\r\nGoogle Cloud Storage should equally be affected here as it's the same logic.\n@skshetry indeed adding another level fixes the issue\n@skshetry Great catch! :pray: In general we do need to create those, because s3 and s3 tools sometimes create dirs like that, it was discussed in https://github.com/iterative/dvc/pull/2683 . We don't need-need to do that, but it is a compromise to handle both approaches. But we definitely need to handle this corner case better.\n@efiop, it depends on how you see it. We can handle this corner case, but, there's no need for us to create a directory in most of the cases (except of #2678 cases?).\r\n\r\ni.e. `dvc add s3://dvc-bucket/data/file` should not even try to create `s3://dvc-bucket/data/` as a directory.":1,"It seems so good to me.\r\n\r\n> We should change this in v1 to `__root__` to better match other nomenclature, eg. custom root types, where we use `__root__`.\r\n\r\nI know about custom root type. But, someone might not understand the `__root__` clearly.\nagreed, documentation will need improving, but better to use one term everywhere.\r\n\r\nI'm not wedded to the term \"root\" other offers considered...\nI agree.\r\n\r\nMy answer is `__root__` for the title of the issue. \r\n\n> We should also implement at some point entire model validation\r\n\r\nFor the sake of discussion, here's a somewhat radical proposal:\r\n1. If the list of fields passed to the validator is empty, it becomes a `__root__`/entire-model validator (depending on whether the model has a non-default `__root__`). (I see no reason to drop support for `@validator('__root__')`, but wouldn't be opposed to it for simplicity's sake.)\r\n2. The `loc` field of `ErrorWrapper` is changed from typically containing `{field.name}` to typically containing`{Model.Config.title or Model.__name__}.{field.name}`.\r\n * I recognize that right now, validation happens inside the `Field`, which is not aware of the containing model. I think this could be addressed by the existence of a function/method that returns a modified `ErrorWrapper` where `loc` includes the name of the model.\r\n3. For `__root__` or entire-model validation, drop the `.{field.name}` from `loc`.\r\n\r\nTechnical implementation challenges aside, I think this might be a good way to achieve a unified interface for specifying whole-model *and* `__root__` validators, and for achieving consistent error labeling.\r\n\r\nI'll open a separate issue to discuss the implications of adding the model name to `ErrorWrapper`, but, any thoughts on this in the context of handling root errors?\nSo you're basically suggesting prefixing `loc` with the model name?\r\n\r\nThe problem is that very often (in few types of case, but the large majority of cases) we're simply doing something like:\r\n\r\n```py\r\nclass LoginModel:\r\n email: EmailStr\r\n password: constr(max_length=100)\r\n\r\ndef login_view(request):\r\n m = LoginModel(**request.json())\r\n ...\r\n```\r\nThe user (or frontend developer) has never heard of `LoginModel`, so an error at `loc: ['LoginModel', 'email']` is a lot more confusing than at `loc: ['email']`.\r\n\r\nWe could just use the model name instead of `__root__` for those errors?\nI know that in theory `loc` is designed to be processed by the middleware of using package and used to generate the final error format, but I for one quite often end up using it directly.\n@samuelcolvin I was coming from the context of FastAPI, and was imagining that the model name is externally available through the OpenAPI spec. But maybe that is a big assumption (especially if you want validation errors but don't want a public OpenAPI spec). I guess it would be nice if there were a config setting for `include_model_name_in_error_loc` (or something less verbose 😅). That would make it possible to optionally incorporate the change without many changes to code, and without needing to worry about causing conflicts with frameworks using pydantic (like FastAPI).\r\n\r\nEither way, I think `__root__` would be similarly confusing externally. But it would have the advantage of leaking less implementation information if you wanted to avoid that.\nI agree `__root__` isn't much better. But at least it won't get added to the majority of errors which have a field.\r\n\r\nWhat do others think?\r\n\r\nAlso what do other libraries do? Django (and presumably therefore DRF) uses `__all__` which is no clearer than `__root__`.\nI've thought this through some more, and given ValidationError tracebacks now include the model name, I think it makes sense to just use `__root__` everywhere (and prefer this to `__all__`). For the sake of debugging, I think it's fine to require access to the tracebacks to determine the model name. I'll take a shot at implementing.\n@samuelcolvin I tried taking a shot at this, but I think I got in over my head. Validation currently always happens in a field, and I couldn't find a way to add \"root\" validation for non-custom-root models without substantial and/or very-likely-controversial changes. Given the amount of changes that it seemed like I'd have to make, (not to mention the effort required to get it to actually *work* even if I *was* willing to make large changes), I'm going to leave this to you for now.\r\n\r\n(If you have an approach in mind that you think should work, I would also be happy to put in some renewed effort given more guidance.)\n@dmontagu do you have your unfinished code in a fork or branch somewhere where I can take a look at it?\n@skewty I'll look, but I'd probably advise against basing anything off of it, as I made a number of choices that backed me into a corner that I don't think had any hope of success. And I *suspect* any approach to this will have to get pretty deep into the internals, so it's probably better not to push your thinking down the same line that led me to problems.\r\n\r\nI was thinking about this recently, and thought it might actually be easier to implement this somehow via something closer to the dataclass `__post_init__` than to how validators currently work.\nI'm going to work on this soon, just haven't got a chance yet.":1,"+1\r\n\r\nI unprotected and changed a single file in a directory. dvc add for this directory makes unnecessary unprotection and copy of other files.\n+1\r\n\r\nI unprotected and changed a single file in a directory. dvc add for this directory makes unnecessary unprotection and copy of other files.":1,"Not able to reproduce. Not with `dvc diff` nor with `dvc metrics diff`, nor with current version nor with 0.87.0. Closing for now. @DavidGOrtega Please let us know if you are still experiencing this issue. And please provide verbose logs (by adding -v to the command) next time, so it is easier to see the issue in details. Thank you!\nHello, \r\n\r\nI have the same error on 1.0.0a2 when I do either `dvc diff`, `dvc metrics diff` or `dvc metrics show`. Here is the verbos log:\r\n\r\n```\r\n2020-05-20 15:16:59,145 ERROR: unexpected error - 'NoneType' object has no attribute 'endswith'\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/main.py\", line 48, in main\r\n ret = cmd.run()\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/command/metrics.py\", line 45, in run\r\n recursive=self.args.recursive,\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/metrics/__init__.py\", line 13, in show\r\n return show(self.repo, *args, **kwargs)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/__init__.py\", line 25, in wrapper\r\n ret = f(repo, *args, **kwargs)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/metrics/show.py\", line 99, in show\r\n vals = _read_metrics(repo, metrics, rev)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/metrics/show.py\", line 63, in _read_metrics\r\n with tree.open(metric, \"r\") as fobj:\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/tree.py\", line 192, in open\r\n path, mode=mode, encoding=encoding, **kwargs\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/tree.py\", line 49, in open\r\n if len(outs) != 1 or outs[0].is_dir_checksum:\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/output/base.py\", line 170, in is_dir_checksum\r\n return self.remote.is_dir_checksum(self.checksum)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/remote/base.py\", line 322, in is_dir_checksum\r\n return checksum.endswith(cls.CHECKSUM_DIR_SUFFIX)\r\nAttributeError: 'NoneType' object has no attribute 'endswith'\r\n------------------------------------------------------------\r\n```\r\n\r\nPlease let me know how can I help on that.\r\n\r\nHere is my `dvc version`:\r\n```\r\nDVC version: 1.0.0a2\r\nPython version: 3.6.8\r\nPlatform: Darwin-19.3.0-x86_64-i386-64bit\r\nBinary: False\r\nPackage: pip\r\nSupported remotes: http, https, s3\r\nCache: reflink - supported, hardlink - supported, symlink - supported\r\nFilesystem type (cache directory): ('apfs', '/dev/disk1s2')\r\nRepo: dvc, git\r\nFilesystem type (workspace): ('apfs', '/dev/disk1s2')\r\n```\nNot able to reproduce. Not with `dvc diff` nor with `dvc metrics diff`, nor with current version nor with 0.87.0. Closing for now. @DavidGOrtega Please let us know if you are still experiencing this issue. And please provide verbose logs (by adding -v to the command) next time, so it is easier to see the issue in details. Thank you!\nHello, \r\n\r\nI have the same error on 1.0.0a2 when I do either `dvc diff`, `dvc metrics diff` or `dvc metrics show`. Here is the verbos log:\r\n\r\n```\r\n2020-05-20 15:16:59,145 ERROR: unexpected error - 'NoneType' object has no attribute 'endswith'\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/main.py\", line 48, in main\r\n ret = cmd.run()\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/command/metrics.py\", line 45, in run\r\n recursive=self.args.recursive,\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/metrics/__init__.py\", line 13, in show\r\n return show(self.repo, *args, **kwargs)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/__init__.py\", line 25, in wrapper\r\n ret = f(repo, *args, **kwargs)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/metrics/show.py\", line 99, in show\r\n vals = _read_metrics(repo, metrics, rev)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/metrics/show.py\", line 63, in _read_metrics\r\n with tree.open(metric, \"r\") as fobj:\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/tree.py\", line 192, in open\r\n path, mode=mode, encoding=encoding, **kwargs\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/repo/tree.py\", line 49, in open\r\n if len(outs) != 1 or outs[0].is_dir_checksum:\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/output/base.py\", line 170, in is_dir_checksum\r\n return self.remote.is_dir_checksum(self.checksum)\r\n File \"/Users/corentinhembise/.local/share/virtualenvs/speech-XlnoNrSS/lib/python3.6/site-packages/dvc/remote/base.py\", line 322, in is_dir_checksum\r\n return checksum.endswith(cls.CHECKSUM_DIR_SUFFIX)\r\nAttributeError: 'NoneType' object has no attribute 'endswith'\r\n------------------------------------------------------------\r\n```\r\n\r\nPlease let me know how can I help on that.\r\n\r\nHere is my `dvc version`:\r\n```\r\nDVC version: 1.0.0a2\r\nPython version: 3.6.8\r\nPlatform: Darwin-19.3.0-x86_64-i386-64bit\r\nBinary: False\r\nPackage: pip\r\nSupported remotes: http, https, s3\r\nCache: reflink - supported, hardlink - supported, symlink - supported\r\nFilesystem type (cache directory): ('apfs', '/dev/disk1s2')\r\nRepo: dvc, git\r\nFilesystem type (workspace): ('apfs', '/dev/disk1s2')\r\n```":1,"what's the purpose of doing `dvc metrics show` for a single path instead of using `cat`?\r\n\r\n`cat ml_model/level_{1,2}/scores.json` ?\nwhat about `dvc metrics show ml_model/` ?\r\n\r\nrelated: https://github.com/iterative/dvc/issues/2318\nTo run it with -a or -T for example?\nOr `metrics show -R dir` another example for a single target case\nIndeed, @shcheklein :+1: ":1,"Might be possible to achieve this now using validator decorators. Please let us know whether or not you get it to work.\r\n\r\nI'd be happy to accept a PR to implement it out of the box if it's something multiple people want.":1,"Off topic: Makes me think if we should re-consider auto `git add`ing files. :thinking: ":1,"Probably works because of some `Enum` class magic, but could be reworked to:\r\n```py\r\nif card_number.brand in (PaymentCardBrand.visa, PaymentCardBrand.mastercard)\r\n```\nI guess it depends if equality is acceptable compared to identity?\nI don't think magic works with is, and I'm kinda glad of that:\r\n```\r\n>>> from enum import Enum\r\n>>> class CardType(Enum):\r\n... Diners = 1\r\n... Visa = 2\r\n... MasterCard = 3\r\n...\r\n>>> CardType.Visa is (CardType.MasterCard or CardType.Visa)\r\nFalse\r\n```\nGood to confirm.\nAssuming this is resolved. \nSadly not resolved, code is still\r\n```python\r\nif card_number.brand is (PaymentCardBrand.visa or PaymentCardBrand.mastercard):\r\n```\r\n\r\nwhich won't work if `card_number.brand` is `PaymentCardBrand.mastercard`.\nYes, I was just confirming the logic is broken. @mike-hart you might want to submit a PR referencing this issue with the fix.\nThe use of `x is (y or z)` is just a mistake, it simplifies to `x is y` if y is truey or `x is z` otherwise. In this case since `PaymentCardBrand.visa` is truey we're effectively just doing `brand is PaymentCardBrand.visa`.\r\n\r\nI'll replace it with an `in` check.\r\n\r\nA bit more background on enums:\r\n* Identity checks work with the actual enum member but not with the enum value\r\n* equality checks work with both the enum member and the enum value, but **only if** the enum inherits from the correct type as well as `Enum`.\r\n\r\n```py\r\nclass PaymentCardBrand(str, Enum):\r\n amex = 'American Express'\r\n mastercard = 'Mastercard'\r\n visa = 'Visa'\r\n other = 'other'\r\n\r\nb = PaymentCardBrand.visa\r\nb is PaymentCardBrand.visa\r\n#> True\r\nb == PaymentCardBrand.visa\r\n#> True\r\n\r\nb = 'Visa'\r\nb is PaymentCardBrand.visa\r\n#> False\r\nb == PaymentCardBrand.visa\r\n#> True (This would not work with the current PaymentCardBrand which doesn't inherit from str)\r\n```\r\n\r\nI don't know if `card_number.brand` can be a string or only a `PaymentCardBrand`, but better to fix anyway.":1,"@pmrowla, great catch!\r\n\r\nWhat about metrics? Does `dvc metrics diff 123456 abcdef` where `abcdef` is a ref to an experiment?\r\n\r\n> One concern would be how to handle situations where someone wants to save an experiments plot\r\n\r\n@pmrowla could you please clarify? Are you talking about the output of the `dvc plot diff` command? If so then, yes, it is a temporary file.\n@dmpetrov `dvc metrics diff` will not work with experiment refs, but `dvc exp diff ...` can be used to compare regular commits with experiment refs.\r\n\r\nAnd yes, I was referring to the html output files generated by plot diff and show.\nI think that we should not allow persistence of experiment plots, if the change is worth committing, it should be committed to the repo. \nSo after starting an implementation of `dvc exp plots ...` subcommands, I've got a couple thoughts:\r\n\r\nRather than having a separate `dvc exp plots show ...` command, it may make more sense to just use regular `dvc plots show`. To show plots for a specific (regular) commit, you currently need to do:\r\n\r\n```\r\n$ git checkout \r\n$ dvc plots show ...\r\n```\r\n\r\nFor experiments, you can already get the same behavior, by doing:\r\n\r\n```\r\n$ dvc exp checkout \r\n$ dvc plots show\r\n``` \r\n\r\nSo I'm not sure it makes sense for us to have a dedicated `dvc exp plots show` command.\r\n\r\nFor `plots diff`, we do accept specific revisions, but rather than have a dedicated `dvc exp plots diff ...` subcommand, we could just add a `-e/--experiment` flag to the existing `dvc plots diff` to denote that experiment revisions will be included in the list of revisions to diff.\r\n\r\nLikewise, for #4455 I think all we would need is the new parallel coordinates template to do `dvc plots diff -t [-e] [revisions...]` (and could be used with both regular and experiment revisions)\r\n\r\nSo in summary there's 2 questions here:\r\n\r\n1. Would `dvc exp checkout` + `dvc plots show` work for us? (It seems to me that it would work)\r\n2. Would we prefer `dvc plots diff -e/--experiment ...` or a separate `dvc exp plots diff ...` (using `-e` rather than an entire new command seems cleaner to me)\r\n * This question can also be extended to metrics/params, do we want to keep the dedicated `dvc exp diff ...` command, or should we just have `-e` for `metrics diff` and `params diff`?\r\n\r\n@dmpetrov @pared \n@pmrowla I agree that separate command might be overkill.\r\n`-e` option for `diff` would match nicely with `run -e`. \r\nAnother option would be to provide a special constant that would mark the current workspace for `plots` and `metrics`. \r\nFor now, it is only possible to plot current, uncommitted workspace only against one other `revision` (eg `dvc plots diff HEAD`)\r\nMaybe we should just allow something like `dvc plots diff workspace HEAD`. That way we could handle experiments, and would extend functionality of plot.\r\n\r\nAh, now I see that `workspace` approach would prevent us from comparing experiments, which is not good. I guess `-e` is the way to go. ":1,"Surely as simple as implementing `__getstate__` and `__setstate__` on `ValidatoinError`?\r\n\r\nIf it is that simple, PR welcome to implement it.\nI don't believe so. I did spend a few hours trying to do just that and couldn't get it to work. Iirc, the problem with that approach is that `__setstate__` isn't called until after the object's `__init__` is called. So the error has already occurred and you don't get a chance to fix things.\r\n\r\nReading the pickle docs, it seemed like `__getnewargs_ex__` might be a way to get that to work but i failed at getting that to work too. I'm not sure, though, if i just didn't/don't understand getnewargs or if it turned out that exceptions are specialcased already and that specialcasing was interfering (in one of the python bug reports, ncoghlan noted that there were specialcases in exceptions which perhaps no longer worked with the new ways that picked worked)\r\n\r\n\nMy python.org bug from earlier was a duplicate. I've closed it. This is the older bug: https://bugs.python.org/issue27015 There's a cpython PR which I confirmed would fix at least the mandatory keyword args. It is currently awaiting another review from a python core developer: https://github.com/python/cpython/pull/11580\nHi @abadger I just had a look at it and I think I have something working.\r\nI'm opening a PR! Please tell me if it works with your whole example":1,"Great point! Our internal API function dvc/project.py:Project.status() actually returns a dict, which then gets printed in dvc/command/status.py, so it should be pretty easy to implement.\nJust need to add `--json` option in the dvc/cli.py for `dvc status` and then process it in `dvc/command/status.py` and json.dump() it, instead of printing as usual.\nYeah I took the Project.status() in mind, I'm starting to dive in the code 🤓\r\n\r\nI would additionally include the `callback`, `locked` and `checksum_changed` boolean values explicitly so that the status can be explained. This would also be useful in the human readable output IMO.\r\n\r\nAnd another thing, I'm thinking it would be useful to have separate `outs_changed` and `outs_missing` since the consequences are a bit different and should probably be reflected in the status icon - I would go for red with changed outputs and dark grey with missing outputs. I'm guessing users can pull someone else's repository and work with most DVC files without the outputs and I don't want the icons to scream in red. But when there's a mismatch in the output file's checksum, we should take that as a warning so red color makes sense.\nAlso, showing a status icon means that I have to turn these stage status properties into a single distinct status icon. Since it would be too much to have an icon for all the combinations, the way I see it is to process the properties by severity to produce something like this:\r\n\r\nIf locked -> locked (yellow lock icon overlay?)\r\nElse if any outputs changed -> outs_changed (red)\r\nElse if any outputs missing -> outs_missing (grey)\r\nElse if md5 changed -> checksum_changed (blue)\r\nElse if any dependencies (`--with-deps`) changed or missing -> deps_changed (orange)\r\nElse -> ok (original DVC colored icon)\r\n\r\nWe could also independently show `--always-reproduce` using some overlay, e.g. a yellow dot in bottom left.\r\n\r\nMaybe that logic should actually be done internally and shown in an additional field like `\"status\": \"deps_changed\"`. There could even be a `--simple` option that would show just this field in human readable / machine readable format.\r\n\n@prihoda Sounds amazing! But how would we handle a bunch of changed things? E.g. deps and outs changed. Black icon?\nPersonally I wouldn't go for those combinations, I would just process it from most severe to least severe and show the first one using the if-else approach. When the file is open, we can show a detailed description. If you consider all combinations of outputs (changed, missing, ok), dependencies (changed, ok) and checksum (changed, ok), it's 3x2x2 = 12 combinations.\r\n\r\nBut I'm definitely open for discussion on this. We could enumerate those combinations that might make sense to be shown explicitly and decide on each one based on the different use-cases. In my view, for a file with invalid outputs, I already know that it should be reproduced or pulled so I don't care that dependencies were changed as well.\r\n\r\nWe could each property with a different marker - like circles in bottom left and right corners, but that might start to become quite busy. I will try to experiment in Photoshop, it will be much easier to decide visually.\nMoving the discussion here: https://github.com/iterative/intellij-dvc-support-poc/issues/1 \nClosing this in favor of https://github.com/iterative/intellij-dvc/issues/1 .\nOops. sorry. This is still very relevant, reopening.\nSome implementation details here: https://github.com/iterative/dvc/issues/3975#issuecomment-640815774\r\n\r\n\nHello! So, I think this is a very useful feature to implement, because it gives the possibility to use it for integrations in IDEs, which is my case. Since I'm developing a plugin that makes use of dvc, it would be very interesting to have the possibility to run the dvc status and create a file with the output as you do when creating dvc pipelines. I also think that you should use YAML (like you do in .dvc files) or JSON so that it can be easily parsed in different languages (for example, I am making my java plugin). ":1,"@dmpetrov I broke this up into 3 related issues mentioned above to make it more manageable to take action on these. Please add anything missed from those, and we can continue discussion there. 🙏 \nAfter speaking with @dmpetrov, one of the high-level issues is that the defaults for `dvc exp init` are asking the user for too much, too soon, and expecting/assuming too much from the user. Some ideas on how we can fix it:\r\n\r\n1. Make `dvc exp init -i` the default.\r\n2. Make `dvc exp init --explicit` the default.\r\n3. Make `dvc exp init -i` the default if no command argument is provided, otherwise make `dvc exp init --explicit` the default.\r\n\r\n@skshetry Let me know if you have thoughts.\r\n\r\nIn the interactive mode, this might also relate to earlier discussions about whether a blank entry should be skipped or use the default path.\nOther options:\r\n\r\n4. Create another `type` like `simple` or `blank` that creates a stage with only a command (no deps or outputs). This might also allow us to get rid of `--explicit`.\r\n5. Allow for `--data`, `--params`, etc. to be passed without arguments, in which case they will be added to the stage with default values. This would be useful if `--explicit` is the default or if there is blank stage `type`. If we do this, we probably need short flags for these to enable something like `dvc exp init -dp`.\r\n\r\nIMO making `--explicit` the default might make the command too close to `dvc stage add`, but the other ideas could all work.\n> IMO making `--explicit` the default might make the command too close to `dvc stage add`, but the other ideas could all work.\r\n\r\nAgree. There are some good options but this one seems like the best one.\n> Agree. There are some good options but this one seems like the best one.\r\n\r\n@dmpetrov We might be saying slightly different things 😄. However, if you want to start from this simple default, I think the best way to achieve this is option 4 above. We can have 3 template types: `default` (a stage with only a command), `ml` (the current default), and `dl`.\r\n\r\nWhy add another template type? One goal of `dvc exp init` was to introduce an easy way to setup typical project structures, such as deep learning with checkpoints and dvclive since it's confusing to configure manually. If we always start with a blank stage, there's no real difference between these template types (at least in non-interactive mode), and their purpose becomes confusing.\n@dberenbaum oh, right 🙂 `--explicit` is going to be the same as `stage add`. It is not a problem to be the same, the problem is it does not serve the purpose of `exp init` - being a shortcut/cookiecutter for experiments.\r\n\r\nThere are couple more questions:\r\n- [ ] `--type=dl` has the same issue as the default one - `dvc exp init --type dl` is not functional until you create all the data/src/metrics files and dirs.\r\n\r\n- [ ] Why there are no plots in `--type=dl` (we have plots in the regular one)?\r\n\r\n- [ ] `--interactive` also does not create a proper structure for the default options. It is very easy to check the file and dirs for existence in interactive mode and ask if they'd like to create the files. That's what the user will see as the result:\r\n```\r\n$ dvc exp run\r\nWARNING: Your workspace contains staged Git changes which will be unstaged before running this experiment.\r\nERROR: failed to reproduce 'dvc.yaml': [Errno 2] No such file or directory: '/Users/dmitry/src/test/exp_init_i/data'\r\n```\r\n\r\nMy opinion (similar to the Conclusion in the initial issue description) - `dvc exp init` (including `--interactive`) cannot successfully finish if the specified (or default) files and directory structure is not created. Otherwise, we just moving `exp init` issues to the laster `dvc exp run` stage where the issues cannot be solved automatically. So, we are basically moving the issues to the user's shoulders.\n@dmpetrov It seems there are multiple use cases:\r\n1. Taking a code script without dependencies or outputs and running it as an experiment. The blank template/`--explicit` default can solve it, or we can make some alias for `dvc stage add` to do this.\r\n2. Adapting an existing project with parameters, metrics, data, etc. into a DVC experiment. Could you take a look at #7138 and provide your feedback? Creating empty files may still move the issues to the user's shoulders. Better messaging can give hints to users on what to do.\r\n\r\nAnother possible use case:\r\n3. Adding dependencies and outputs gradually.\r\n\r\nOne possible way to do this is to allow `dvc exp init` to append new information to an existing stage. For example:\r\n\r\n```\r\n$ dvc exp init python train.py\r\nCreated train stage in dvc.yaml.\r\n$ cat dvc.yaml\r\nstages:\r\n train:\r\n cmd: python train.py\r\n$ dvc exp init --data --models model.h5\r\nModified train stage in dvc.yaml.\r\n$ cat dvc.yaml\r\nstages:\r\n train:\r\n cmd: python train.py\r\n deps:\r\n - data\r\n outs:\r\n - model.h5\r\n$ dvc exp init --data raw\r\nERROR: The specification conflicts with stage 'train' in 'dvc.yaml'. Use '--force' to overwrite.\r\n$ dvc exp init -f --data raw\r\nModified train stage in dvc.yaml.\r\n$ cat dvc.yaml\r\nstages:\r\n train:\r\n cmd: python train.py\r\n deps:\r\n - raw\r\n outs:\r\n - model.h5 \r\n```\r\n\r\nNote that this isn't possible in `stage add` since every dep/out type may be passed multiple times in a stage, making it impossible to know when to append vs. overwrite.\nI was expecting to have a cookie-cutter based on a user's script. We need to generate a template with a predefined structure that users can use to adopt the project step by step. The predefined structure also serves an education goal - user knows possibilities and how to adopt these.\r\n\r\nI think we have to have a proper cookie-cutter as one of the options of `dvc exp init`. It would be great to have this as the default one.\r\n\r\n`--explicit` as default might be an easy starting point but we are losing the idea of cookie-cutter and educational part of it. As a result, we push people to read all the `exp` docs.\r\n\r\nAdding dependencies and outputs gradually is a good idea but looks like overkill 🙂 As we discussed before, we should promote the idea of modifying dvc.yaml manually, not by commands.\r\n\r\nIn any of these scenarios, I don't think we should keep a user with not working `dvc exp run` (as much as we can). Missing files/dir should lead to command failure (or error messages with a repeat in `--interactive` mode).\n> In any of these scenarios, I don't think we should keep a user with not working `dvc exp run` (as much as we can). Missing files/dir should lead to command failure (or error messages with a repeat in `--interactive` mode).\r\n\r\nRight, I think everyone can agree with this. We can prioritize that for now while we continue to think through other ways to improve the experience.\nWe should still try to guide users on `exp run`, as these may still happen if `dvc.yaml` is modified by users. That does not mean that we cannot improve `exp init` message. \r\n\r\nAnother problem that I see these days is that `exp init` has a concept of `data`/`models`/`source`, etc. which we don't have in `dvc.yaml` (which is translated into dependencies/outputs instead) which makes the successive `exp run` confusing.\nIf we make `--explicit` the default, we'll force users to iterate their stage through `dvc.yaml` later, I am not sure if this a good or a bad thing though, but they have different concepts that don't translate well (as mentioned above).\n@skshetry good point about `--explicit`. A whole set of defaults might lead to a better default experience.\n@dberenbaum and I discussed creating workspace structure on defaults, and showing workspace tree on non-interactive mode as we do for interactive modes. We also need to work on #4112 to support tracking everything inside of params file.\n@dmpetrov mentioned in our discussion that `plots` is an advanced feature, so we may want to make it optional here in the `exp init`.\nShould we convert this to a discussion after #7331 rather than close it completely? The immediate action points have been addressed, but I don't think everything has been resolved.\r\n\r\n---\r\n\r\nLooking at #7331, It might be helpful to separate inputs and outputs in the UI to clarify which will be created. No action needed now, but to me it seems confusing why some paths are created and others aren't.\r\n\r\n@tapadipti shared her experience with `dvc exp init` and mentioned that it's confusing that for `data`, you have to do `dvc add`, but not for `models`.\nSure, let's turn it into discussion after #7331, I'll remove the closing marker. \r\n\r\n> @tapadipti shared her experience with dvc exp init and mentioned that it's confusing that for data, you have to do dvc add, but not for models.\r\n\r\nSince we create `data` directory now (after #7331), it makes sense to also track them by defaults. It's a bit unclear if we should always do this or only when we are creating those directories ourselves during `exp init`.\nI just remembered that I have an open PR for tracking dependencies in #6914, but haven't moved forward with it because it requires us to basically re-implement simpler `dvc add`. Tracking dependencies but without ensuring any graph correctness is going to be simple (duplicated outputs check, overlapped output, etc), but otherwise we'll end up re-implementing `dvc add`.\r\n\r\nWe cannot reuse `dvc add` as it's more complex, has its own messy output and progress bar, and in `dvc exp init`, we need to check correctness together with the pipeline stage.\n> (duplicated outputs check, overlapped output, etc)\r\n\r\nSorry, can you explain? I didn't get your point.\n@dberenbaum, we need to check if the data output is in conflict with other existing .dvc files or stages and with the pipeline stage that we are going to generate. The checks that we do are:\r\n\r\n1. That the graph is acyclic (is a DAG),\r\n2. That the output is not duplicated,\r\n3. That the output does not overlap with an already existing output, and \r\n4. That the .dvc file to be generated is not inside the output of a different stage, and\r\n5. The .dvc file does not exist already.\r\n\r\nThis is a bit complicated and involves collecting all `dvc.yaml` and `.dvc` files. #6914 does already handle all of that, but it does so by duplicating what `dvc add` does already. I'm just looking for solving this in a better way, or waiting to come up with a better solution.\r\n\r\nWe could however simplify the implementation by choosing not to verify, as it is also done later on every operation.\nLet's leave it for now since I believe some of the planned data management changes could impact this, and it might make more sense to revisit later.\nMaybe we can have a label for these summaries?\nFrom the @dberenbaum's comment on https://github.com/iterative/dvc/pull/6681#issuecomment-928180859.\r\n\r\n>How do you anticipate this (dvclive scenario) working for non-interactive mode?\r\n>\r\n> I'm still wondering whether the template option is needed (solely as a cli option, not as an actual template; it could be renamed) to separate the dvclive scenario and provide flexibility to add other scenarios in the future. What do you think?\r\n\r\n\r\nOne way may be to make `--live` mandatory for dvclive scenario unless `live` is set on the config (though there's a question about what to do if `metrics` and `plots` are also specified in the config). Maybe keeping `live` from config is also a way to go.\r\n\r\nAnother would be to create a `--template` or something similar flag (`--with={live, etc.}`)? \r\n\r\n\n> Another would be to create a `--template` or something similar flag (`--with={live, etc.}`)?\r\n\r\nAlthough it adds another option, I like this mainly because it's more explicit. Now that we don't have templates, it's less transparent what the command and various options are doing. Having a separate option for this suggests that it's not just another path to add but actually changes the stage configuration. We can also further explain it in the help text for that option.\r\n\r\nOn a similar note, at least interactive mode should probably have a confirmation so that users can see what will be added to `dvc.yaml` beforehand. There could be an option to do the same outside of interactive mode (`--dry`?). Not sure if this has been discussed before for `dvc stage add`.\nTasks that remains:\r\n- [ ] Improve UI/CLI, messaging, and prompts\r\n- [x] Add config support\r\n- [ ] Add tests/QA\r\n- [ ] ~~https://github.com/iterative/dvc/issues/6168~~\r\n- [ ] ~~https://github.com/iterative/dvc/issues/6605~~\nMight be worth revisiting https://github.com/iterative/dvcyaml-schema/issues/7 to better support the workflow: \r\n\r\n`dvc exp init` for simple cases / basic bootstrapping -> manually edit `dvc.yaml` for customization?\nWith dvcyaml-schema, my goal was to have same schema for JSONSchema and validation. We have been using pydantic for JSONSchema generation and voluptuous for validation.\r\n\r\nThere are few issues with using pydantic:\r\n1. It does not have a good support for validating dictionaries with varying keys. `stages` in dvc.yaml is a dictionary with key being stage name and value being stage information. Pydantic is not very good at representing this. Similarly, `deps`/`outs` are also list of dictionaries.\r\n2. It is almost twice as slow as voluptuous, because it uses type-hints.\r\n3. It coerces types (though you can make it stricter). It may be a good thing sometimes, but it does make it quite tricky to make voluptuous-based schema to be compatible with pydantic-based schema.\r\n\r\nIn short term, I think it's better to move pydantic schema to dvc, and support both of those schema. \r\n\r\nVoluptuous also has negatives, that it's not very actively maintained, and is a bit hard to extend to have custom validations\r\ncompared to pydantic (whose DX is great). \r\n\r\nAlso, looking for `cattrs` is another option, but I believe it'll have same issues as `pydantic`. Though migrating to any other libraries may take longer time, as it's quite hard to guarantee compatibilities (we rarely test unhappy paths for dvc.yaml) and the need for QA'ing.\r\n\nWe should probably include docs work also.\nI noticed a couple of workflow issues when doing some QA of `dvc exp init`:\r\n\r\n1. It's unclear when or what to `git add`. If I don't `git add` the `dvc.yaml` or the dependencies, `dvc exp init` works in the workspace but not in temp.\r\n2. If I am adding a data dependency, I most likely want to `dvc add` it rather than `git add`. \r\n\r\ncc @pmrowla \nNew/untracked files aren't pulled into the `--temp/--queue` temporary workspace by default.\r\n\r\nfrom https://dvc.org/doc/user-guide/experiment-management/running-experiments#the-experiments-queue:\r\n\r\n> Note that Git-ignored files/dirs are explicitly excluded from queued/temp runs to avoid committing unwanted files into Git (e.g. once successful experiments are persisted).\r\n>\r\n> 💡 To include untracked files, stage them with git add first (before dvc exp run) and git reset them afterwards.\r\n\r\nIt sounds like we need:\r\n- deps from `exp init` should get `.dvc` files (`dvc add`ed)\r\n- `exp run --temp` should just automatically stage any untracked `.dvc` or `dvc.yaml` files (it seems like this is probably the desired default behavior in all cases, not just `exp init` specific ones?)\r\n - this doesn't address everything and there may be other untracked files that users actually need to manually stage for their experiment, but we can't really automate behavior for non-dvc-related files\n> deps from `exp init` should get `.dvc` files (`dvc add`ed)\r\n\r\nThis is only true for the data dependency, since the code and params dependencies probably should be git tracked. We might just start with a hint here. Doing a `dvc add` automatically might be unexpected and make `dvc exp init` much slower. I don't love the hint idea, but it's an easy starting point. 🤔 \r\n\r\n> `exp run --temp` should just automatically stage any untracked `.dvc` or `dvc.yaml` files (it seems like this is probably the desired default behavior in all cases, not just `exp init` specific ones?)\r\n> \r\n> * this doesn't address everything and there may be other untracked files that users actually need to manually stage for their experiment, but we can't really automate behavior for non-dvc-related files\r\n\r\n👍 Can we also try to add (not force but at least do a regular git add) any other dependencies of the experiment?\nDiscussed with @pmrowla and he noted that iterating over stages and git-adding everything during `exp run` could be time-consuming when doing things like queuing an experiment. It seems easier to git add during `exp init` since we already have all the stage info. Thoughts on adding a git add operation at the end of the command @skshetry? This might make sense as a `dvc stage add` option along the lines of #5932.\r\n\r\n---\r\n\r\nWould it be possible to mark the `data` dependency as dvc-tracked in a lightweight way? For example, add `data` to `.gitignore` and create a `data.dvc` file with contents:\r\n\r\n```\r\nouts:\r\n- path: data\r\n```\r\n\r\nThis also seems like it might be useful generally as an option in `dvc add` (similar to https://git-scm.com/docs/git-add#Documentation/git-add.txt---intent-to-add but likely more useful given the size of data dvc handles). Thoughts/suggestions?\nTill now, we have considered dependency to be a git-tracked or dvc-tracked dependency, so we do not track them by default. But makes sense to do for `data` in `exp init`.\r\n\r\nWould it be confusing to create `data.dvc` and `dvc.yaml` files?\r\n\r\nRegarding the `git add` by default, we have `core.autostage` config that does this automatically. I don't see any issues with doing this by default (it's only `dvc.yaml`/`.gitignore` right?).\n@dberenbaum, should we also `dvc init` by default on `dvc exp init`?\n> Would it be confusing to create data.dvc and dvc.yaml files?\r\n\r\nIdeally, we could specify in `dvc.yaml` that `data` is a DVC-tracked dependency, but I think it's probably easier to add `data.dvc` than to figure out new `dvc.yaml` syntax for now, unless you have an idea to do that easily. Adding `data.dvc` might be awkward, but it also introduces a basic concept to users. What do you think?\r\n\r\n> I don't see any issues with doing this by default (it's only dvc.yaml/.gitignore right?).\r\n\r\n👍 \r\n\r\n> @dberenbaum, should we also `dvc init` by default on `dvc exp init`?\r\n\r\n👍 \n@pmrowla, do we need to also stage `params` file? Or, does `experiments` does some magic for those?\r\n\r\nEDIT: looks like we do need to stage that file ourselves.\nJust wanted to upvote:\r\n\r\n> Use train as a default name? (exp init: iterate on ui, tree and show to-be-generated yaml content #6750 (comment))\r\n\r\nAt least for the `--type live` option, having the name of the added stage (`live`) to be the same as one of the output fields feels a little strange.\r\n\r\nAnd regardless of whether is a deep learning (`live`) experiment or not, the action to be performed in the stage would be the same (so `train` sounds good)\nWhat about naming the types to match the use cases, like classical ml and deep learning? `ml` and `dl`? This might be more user-friendly since it focuses less on the products/features involved, plus `dvclive` might expand to non-deep learning use cases, which makes things even more confusing.\nI'm sorry, I do not quite understand why `live` is a `dl` only thing?\nTwo reasons:\r\n\r\n1. The stage includes checkpoints.\r\n2. Dvclive was initially created for \"live\" monitoring of steps during training.\r\n\r\nI would consider these to mostly be useful when doing deep learning, although maybe that's my personal bias. @karajan1001 do you see those being useful in other scenarios?\r\n\r\nHowever, dvclive may become more of a general-purpose logger, in which case it may be useful in any ml scenario. That's one reason to consider renaming `dvc exp init --type live`.\nMy 2c: \r\n\r\n`--type` may be renamed to `--feature` for a more flexible approach. \r\n\r\n```\r\ndvc exp init --feature live --feature some-future-feature\r\n```\r\n\r\nmay place nicely together then. \n@iesahin, `dvc exp init` is targeted more in terms of _scenarios_ rather than features, which we have plenty of in `dvc stage add`.\n@skshetry Can we go with `dl` or `deep` for now since that's the focus of the scenario? We don't even need to rename the default.\n> `init` dvc repo if uninitialized\r\n> Add data as a dvc-tracked directory\r\n\r\nAfter discussion, these are being de-prioritized for now since they are not simple changes and are not strictly necessary.\r\n\r\n\n> Can we go with dl or deep for now since that's the focus of the scenario? We don't even need to rename the default.\r\n\r\n`deep` does not convey enough information, I think. Is `DL` a common abbreviation?\r\n\r\n\n> Is `DL` a common abbreviation?\r\n\r\nYeah, it's not like ML, but it's fairly common. Google \"deep learning abbreviation.\"\n@dberenbaum, with `--dl`, I don't think `dl` as a default stage name makes sense. Maybe we should go with `train` then? \nWhat's the status @skshetry?\n> Raise issue if dvc is initialized with --no-scm or if the project is not git init-ed.\r\n> should we also dvc init by default on dvc exp init?\r\n\r\nI think `exp init` could even do both `git/dvc init` if needed (https://github.com/iterative/dvc/issues/7109). But definitely at least the latter 👍 \nAlso, I noticed `params.yaml` is the only required part of the default project structure (even if it's empty). Should `exp init` create an empty one (with some comment on top perhaps) if it's not there? Otherwise it fails.\n> Also, I noticed `params.yaml` is the only required part of the default project structure (even if it's empty). Should `exp init` create an empty one (with some comment on top perhaps) if it's not there? Otherwise it fails.\r\n\r\n@jorgeorpinel See my response in https://github.com/iterative/dvc.org/pull/3071#discussion_r766966189.\n> The params section of dvc.yaml currently expects keys from a file\r\n\r\nWhat about generating a dvc.yaml without `params` if there's no params.yaml file?\nrelated: https://github.com/iterative/dvc/issues/7130":1,"I'm looking into this, but I'm afraid the solution isn't trivial.\r\n\r\nWeird that this got 11 :+1: and 2 :eyes: in 2 hours. Any idea why this happened?":1,"Also found out that `dvc run --help` explicitly declares support for `dirs` for both of these options:\r\n\r\n```\r\n -m , --metrics \r\n Declare output metric file or directory.\r\n -M , --metrics-no-cache \r\n Declare output metric file or directory (do not put\r\n into DVC cache).\r\n```\nFrom Discord discussion I assume that `-m`/`-M` flags should not accept directories for now.\n@nik123 That's correct! Thank you so much for looking into it! :pray: ":1,"You can resolve this by removing the extra `@dataclasses.dataclass`.\r\n\r\nMight have similar origin to #4907.\nI applied both decorators to shorten the example. In my actual code I can't remove the extra `dataclass` because I have the following situation:\r\n\r\n```\r\n# can't change this definition\r\n@dataclasses.dataclass\r\nclass ValueObj:\r\n name: str\r\n\r\n\r\n# IO module\r\nValueObj = pydantic.dataclasses.dataclass(domain.ValueObj)\r\n```\r\n\r\nP.S. Your response time is impressive!\nThat was luck, often it's not so good. :-)\n\nAs mentioned on the linked issue, the current dataclass support is very\ninvolved, most of our bug fixes have been related to trying to patch\ndataclass edge cases.\n\nI'd strongly prefer not to change dataclass support again in v1, but\nhopefully we can fix this properly in v2.\n\nUnless we can find a trivial solution.\n\n\nOn Fri, 13 Jan 2023, 16:34 mbillingr, ***@***.***> wrote:\n\n> I applied both decorators to shorten the example. In my actual code I\n> can't remove the extra dataclass because I have the following situation:\n>\n> # can't change this definition\n> @dataclasses.dataclass\n> class ValueObj:\n> name: str\n>\n>\n> # IO module\n> ValueObj = pydantic.dataclasses.dataclass(domain.ValueObj)\n>\n> P.S. Your response time is impressive!\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you commented.Message ID:\n> ***@***.***>\n>\n\nI can understand that you don't feel the urge to touch the dataclass support in v1.\r\n\r\nFor me it's already a win to know that this is really a bug and not just me misusing your API. If it can't be fixed upstream I'll find a workaround. (Even though I'm not particularly fond of the idea, it should be easy to just duplicate the `ValueObj` definition.)\r\n\r\nStarting Monday, I could free some time to look into this in more detail. Would you accept a pull request if I managed to find a simple fix? (I don't expect to, but you never know... :))\nPossibly, it's really up to @PrettyWood who is the \"master of dataclasses\".\nI dug deeper and found that the recursion occurs while deep-copying a `DataclassProxy`.\r\nAt some point Python's `copy._reconstruct` function creates a new uninitialized `DataclassProxy`, then checks if this has a `__setstate__` attribute.\r\n`DataclassProxy.__getattr__` wants to delegate the attribute access to it's `__dataclass__` field, but being uninitialized, this field does not exist and looking it up invokes `DataclassProxy.__getattr__` again...\r\n\r\nI think this problem could be solved by adding a `__deepcopy__` method to `DataclassProxy`:\r\n\r\n```python\r\nclass DataclassProxy:\r\n\r\n # ...\r\n\r\n def __deepcopy__(self, memo: Any) -> \"DataclassProxy\":\r\n return DataclassProxy(deepcopy(self.__dataclass__, memo))\r\n```\r\n\r\nThis seems to fix my example above (I did not check if it breaks anything else, though).\nYup @mbillingr ! Seems like the right fix 👍":1,"+1\n@m-gris You are using the hydra integration, right? Does it not solve your use case, or you just want to simplify by not needing hydra?\n@dberenbaum \r\nYes I do use the hydra integration indeed, but, afaik, it does not allow to do what I'm trying to do, i.e reduce hardcoding to the bare minimum by composing expanded variables as much as possible. \r\n\r\nHere is a silly toy example that should suffice in illustrating what I have in mind: \r\n\r\nLet's say that in `config.yaml` I have the following: \r\n\r\n```yaml \r\nhydra_test:\r\n dir: 'raw'\r\n file: 'items.csv'\r\n path: ${hydra-test.dir}/${hydra_test.file}\r\n```\r\n\r\nThe `path` key will indeed be properly parsed & evaluated if using OmegaConf in a python script. \r\n\r\nBut if, in `dvc.yaml`, I try to do the following: \r\n\r\n```yaml \r\nstages:\r\n hydra_test:\r\n cmd: echo ${hydra_test.path}\r\n```\r\n\r\nIt results in the following error: \r\n\r\n`/bin/bash: line 1: ${hydra_test.repo}/${hydra_test.file}: bad substitution`\r\n\r\nIt would actually be really nice if dvc could interpret / evaluate those. :) \r\n\r\n\n> The path key will indeed be properly parsed & evaluated if using OmegaConf in a python script.\r\n\r\nThis should be also working and the params.yaml should contain the interpolated value.\r\nI will take a look to find out why it is not working":1,"The error message should say to install `dvc[webdav]` and not `dvc[webdavs]`, but either way it should also be installed automatically with `[all]`.\r\n\r\nIs this a DVC installation that you've upgraded from earlier versions via pip? webdav support was added (relatively) recently, so it's possible that if you upgraded via pip it was not installed.\r\n\r\nRunning\r\n```\r\npip install --upgrade dvc\r\n```\r\nwill only upgrade core DVC, and not any of the remote dependencies.\r\n\r\nTo upgrade DVC and its remote dependencies, you need to run\r\n```\r\npip install --upgrade dvc[all]\r\n```\r\n(or substitute the specific remotes you wish to upgrade instead of `all`).\nYes, I upgraded from 1.10.1 via pip. But I had not installed any remote bevore and run\r\n```\r\npip3 install dvc[all]\r\n```\r\nwhich installed all remotes successfully. \r\n\r\nHence, from your answer, I assume that the trailing s in _webdavs_ was the only problem?\nYes, the issue was just the trailing s. Internally, the URL scheme for webdav over SSL/TLS is `webdavs`, but the extra to be installed is `[webdav]` (which covers both `webdav` and `webdavs` remote URL schemes). However, when we generate the error message for a missing remote dependency we incorrectly substitute the URL scheme rather than the proper pip extras name.\r\n\r\nThis bug would also apply to `http` remotes, for HTTPS URL's the error message will suggest installing `dvc[https]` when it should be `dvc[http]`.":1,"We have already had a test about this.\r\n\r\nhttps://github.com/iterative/dvc/blob/68897aa0def4e509a27d75f2e78106495d08e0c4/tests/func/test_ls.py#L167-L173\r\n\r\nand it was introduced from #3246 at the beginning of `dvc list` command.":2,"Note: let's make timestamps show at the beginning of the message, in a human-readable format.":1,"Hi @tommilligan \r\nThe thing is we can't do anything about it because python changes `Union[A]` into `A` at interpretation time.\r\n```py\r\nfrom typing import Union\r\nclass A: ...\r\nassert Union[A] is A\r\n```\r\nSo at runtime, pydantic has no way to know `A` was supposed to be a `Union`\nAh, that makes sense. I was about to start poking to see if the simplification was internal to `pydantic` or not, but if it's at a higher layer I'll give it up as a lost cause.\r\n\r\nWould you accept a PR noting this as an edge case in the documentation/error message?\r\n\r\nI suppose the workaround is to add a dummy second type into the union, or just to remove the discriminator until it is required.\r\n\r\nFor documentation forward compatibility, we were using [openapi-schema-pydantic](https://github.com/kuimono/openapi-schema-pydantic), which now collides with the `discriminator` Field property nicely. But that's not your problem! I'll file a bug over there now.\nDocumentation PRs are always welcome :)":1,"Awesome! Please also check the latest state of the changes in iterative/dvc.org/pull/967 so both sides are in sync when this is addressed 🙂 \nAlso, should the file name be `list.py`? its `ls.py` now.\nAnd not sure about the `target` name for the last param... Why not `path` like in `dvc get/import`?\r\nUPDATE: Investigating in https://github.com/iterative/dvc.org/pull/1021#pullrequestreview-368410823\nAnother question here: is there an easy way to tell which files in the list are tracked by DVC and which ones by Git? This is kind of important for finding paths that can be sent to `dvc import/get`.\n@jorgeorpinel At this moment only coloring is supported as a way of the differentiating types of the files.\r\nYou may read more in [the comment](https://github.com/iterative/dvc/issues/3431#issuecomment-593560766)\n@JIoJIaJIu I think that link is just somewhat relevant. We have two things to colorize in a different way:\r\n\r\n1. DVC tracked files - e.g. we need to use a different color for a directory we did `dvc add data\\` with. Or the same for `dvc add data\\data.xml`. And we should have a flag (--outs?) to filter DVC \"outputs\" only.\r\n2. DVC-files - we can use a color scheme for links, for example (since they serve to some extent as a link)\r\n3. Git-traked .dvcignored files.\r\n\r\nThat link it probably relevant to 3 only, but most likely @jorgeorpinel was asking about 1.\r\n\r\n\n> That link it probably relevant to 3 only\r\n\r\nIn the comment I provided info about **1** and **2** scenarios, but the main issue relates to **3**, right\r\nLet me summarize it here then\r\n\r\nWe color output as `out=color`\r\n```\r\nLS_COLORS=\"out=01;37\" dvc list ...\r\n```\r\nwill color outs only\nOK thanks for the info guys, that's great to know. But since colorizing is not enabled by default (I assume) nor supported in every system, would it make sense to add another way to tell them apart? It can be as simple as a 2nd column in the output that has either `git` or `dvc`, or maybe only `out` for DVC outputs (stuff that you can `dvc import/get`). After all this was one of the main motivations for `dvc list` in the first place.\n@jorgeorpinel what is the motivation for you to distinguish DVC-tracked and Git-tracked files by default? The whole point is to make the whole experience with get/import/list the same for all files as much as possible.\nTrue, I forgot you can get/import Git-tracked files now!":1,"Scm driver how has newline detection implemented by @dmpetrov https://github.com/dataversioncontrol/dvc/blob/master/dvc/scm.py#L89 . Closing.\nCan this issue be reopened? It seems that it's back in the last release (0.41.3)\nHi @nik123 !\r\n\r\nCould you provide more details?\nI have dvc 0.41.3 on Ubuntu 16.04 installed as a deb package. \r\n\r\nIn my system I can easily reproduce the bug by executing the following commands in bash:\r\n```\r\n$ mkdir tmp\r\n$ cd tmp\r\n$ git init\r\n$ dvc init\r\n$ echo \"Hello, world!\" > data.txt\r\n$ dvc add data.txt\r\n```\r\n\r\nThe `.gitignore` file generated by `dvc add data.txt` command doesn't have a newline (i.e. `\\n`) character at the end of `/data.txt` line. I can easily detect absence of newline character by executing:\r\n\r\n```\r\n$ git add .gitignore\r\n$ git diff --cached .gitignore\r\n```\r\n\r\nThe commands above give me the following output:\r\n\r\n```\r\ndiff --git a/.gitignore b/.gitignore\r\nnew file mode 100644\r\nindex 0000000..604fb1f\r\n--- /dev/null\r\n+++ b/.gitignore\r\n@@ -0,0 +1 @@\r\n+/data.txt\r\n\\ No newline at end of file\r\n```\r\n\r\n\n@nik123 Thanks! Reopening. ":1,"I don't think there's currently a solution for this. The most relevant discussion to this is #659, though I wouldn't all this a duplicate since that's specific to `from_orm`.\r\n\r\nThe short answer is that pydantic doesn't (yet) have any solution for detecting and coping with circular references.\r\n\r\nIf you have a suggestion of a solution, please comment on #659 or submit a demo PR. I have no idea off the top of my head, how hard it would be to use the same approach for `from_orm`, `parse_obj` and our case above - or if they're even particularly linked.":1,"Found the root cause of this. This happens because of the optimization that we do if the file is empty (i.e. 0 bytes size). We never create a hardlink for the file with 0 bytes size and therefore it fails when we try to verify if the hardlink was created.\r\n\r\nhttps://github.com/iterative/dvc/blob/a9bc65ee1f0446de766db59ad1b149de064c5360/dvc/remote/local.py#L170-L182\r\n\r\nI think, [`is_hardlink()`](https://github.com/iterative/dvc/blob/a9bc65ee1f0446de766db59ad1b149de064c5360/dvc/remote/local.py#L196) function should be aware of this optimization as well. I'll make a fix tomorrow. \r\n\r\nP.S. This only failed if the file was empty (not even a newline). So:\r\n```sh\r\necho > foo && dvc add foo # works because of extra-newline character added by `echo`\r\ntouch bar && dvc add bar # does not work as the file is empty\r\n```\r\n\r\nAlso, subsequent runs would not fail because before this error is thrown out, the file gets cached (however, deletes file from workspace), i.e.\r\n```sh\r\ndvc init --no-scm\r\ndvc config cache.type hardlink\r\ntouch foo && dvc add foo # fails and deletes foo from workspace\r\ntouch foo && dvc add foo # passes\r\n```\n@skshetry Great investigation! :pray: ":1,"I guess this code should be changed to require either a username or password:\r\n\r\nhttps://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/networks.py#L191-L193\r\n\r\nPR welcome to change this.\r\n\r\nThe work around until that is fixed is to define your own `RedisDsn`:\r\n\r\n```py\r\nclass RedisDsn(AnyUrl):\r\n allowed_schemes = {'redis'}\r\n```\r\n\r\nE.g. without `user_required=True`\nWhy this code should be changed? \r\nhttps://github.com/samuelcolvin/pydantic/blob/e3243d267b06bda2d7b2213a8bad70f82171033c/pydantic/networks.py#L191-L193\r\nCan't we just remove `user_required=True` from RedisDsn?\r\n\r\n\r\n\nmaybe, but it seems extremely rare that someone wants to login to a redis instance without either a username or password.\r\n\r\nMaybe we shouldn't change the code but simply update the docs to explain you might want to use your own custom `RedisDsn`.\r\n\r\nGiven how easy it is to create your own, I'm concerned with catering to the 99% here. I think 99% of cases a redis server will require either a username or password.\nAs far as I know, there is no username in Redis - only password can be set with `requirepass` config parameter, so `user_required=True` has no sense. But may be I dont understand how RedisDSN should be used...\nThat is true I too didn't see any way that redis has user name. It only has password. If RedisDSN has parameter user_required=False then we can use the RedisDSN directly with aioredis.\r\n\r\n```python\r\nredis = await aioredis.create_redis_pool(\r\n 'redis://:sEcRet@localhost/')\r\n```\nOkay, let's remove `user_required=False` from `RedisDsn`. PR welcome.":1,"thanks for reporting, agreed it should apply to all fields.\r\n\r\nCould you try on master or v1.0b2 and confirm if it's still broken?\nJust tested it on `master` and can confirm it is still broken.":1,"agreed this would be useful, it was an oversight not to add a model argument when we first implemented this.\r\n\r\nOptions:\r\n* wait until v2 to add this\r\n* add a `model` argument and break `v1.3` - since this was only released yesterday I think we could call it a patch?\r\n* inspect `schema_extra` and pass `model` only if the argument is expected. This would be entirely backwards compatible but involve more code\r\n\r\nThoughts?\nI'd say option 2 or 3?\nLet's do option 2. PR welcome.":1,"I think we could have a `NoneType` which enforces a value to be `None`. But I think if we made `foo: None` a valid way of enforcing a value is None it would make a lot of logic more complicated and result in lots of easy traps for people to fall into.\nI agree that we should not allow `foo: None` as a type constraint, but we could allow `foo: type(None)` or `foo: pydantic.typing.NoneType`. This would be effectively equivalent to `Literal[None]`.\nMypy, along with other type checkers, already treat `foo: None` as equivalent to `foo: Literal[None]` or `foo: NoneType`. My proposal is that Pydantic match this behaviour, even though it would require some extra logic for the sake of consistency with the rest of the ecosystem.\r\n\r\n`foo: type(None)` makes Mypy fail with `error: Invalid type comment or annotation. Suggestion: use type[...] instead of type(...)`, so while Pydantic should probably allow it at runtime we'd have to recommend assigning `type(None)` to a variable and using that in annotations.\nAccording to https://www.python.org/dev/peps/pep-0484/#using-none, `None` should be considered equivalent to `type(None)`, so this is definitely a bug.":1,"Great idea. I especially like `Cache: hardlink (symlink:yes reflink:no fs:ext4)`. I'd only remove `symlink` part - can we work on a system when symlink are not supported?\r\n\r\nIt would be great to have method of installation. Basically, we need everything that we ask in bug reports.\r\n\r\nBtw.. is Python information useful? Also, which of DVC packages include Python (except Windows)?\r\n\r\nDo we need `VIRTUAL_ENV: /home/ei-grad/.virtualenvs/dvc`? It contains a path which you probably don't want to share in a bug report.\nLove the idea to print more information (especially, caching type).\r\n\r\nFew questions/options to consider:\r\n\r\n1. Why do we need a separate command for that, it's not clear. Why is `--version` alone not enough.\r\n2. Would it be better to dump more information only when we run `dvc --version` in the verbose mode? And keep it clean and simple by default.\r\n3. I think we still could support something like `dvc cache type` that by default prints actual DVC cache type being used.\n> Why do we need a separate command for that, it's not clear. Why is --version alone not enough.\r\n\r\n@shcheklein, usually a command is more detailed, also `flags`/`options` are used when the first argument isn't expected to be a sub-command, for example:\r\n```bash\r\n❯ docker version\r\nClient:\r\n Version: 18.09.3-ce\r\n API version: 1.39\r\n Go version: go1.12\r\n Git commit: 774a1f4eee\r\n Built: Thu Feb 28 20:38:40 2019\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n\r\n❯ docker --version\r\nDocker version 18.09.3-ce, build 774a1f4eee\r\n```\r\n\r\n> Would it be better to dump more information only when we run dvc --version in the verbose mode? And keep it clean and simple by default.\r\n\r\nDo you mean like `dvc --verbose --version` ?\r\n\r\n> Do we need VIRTUAL_ENV: /home/ei-grad/.virtualenvs/dvc? It contains a path which you probably don't want to share in a bug report.\r\n\r\nIndeed, @dmpetrov , people usually mask their paths, maybe we can have something like `virtual environment: [cygwin, conda, venv, pipenv, none]`.\r\n\r\n---\r\n\r\nI love the idea in general, @ei-grad :fire: ! I would add:\r\n- `remotes: [s3, local, ssh, azure, gcs, none]`\r\n- `scm: [git, none]`\r\n\r\n(Usually, a lot of the questions are \"remote\" related, so it is cool to have at least what remote they have configured)\r\n\r\nMaybe we can have a `dvc info` command, reassembling the `docker info` one:\r\n\r\n```\r\nContainers: 14\r\n Running: 3\r\n Paused: 1\r\n Stopped: 10\r\nImages: 52\r\nServer Version: 1.10.3\r\nStorage Driver: devicemapper\r\n Pool Name: docker-202:2-25583803-pool\r\n Pool Blocksize: 65.54 kB\r\n Base Device Size: 10.74 GB\r\n Backing Filesystem: xfs\r\n Data file: /dev/loop0\r\n Metadata file: /dev/loop1\r\n Data Space Used: 1.68 GB\r\n Data Space Total: 107.4 GB\r\n Data Space Available: 7.548 GB\r\n Metadata Space Used: 2.322 MB\r\n Metadata Space Total: 2.147 GB\r\n Metadata Space Available: 2.145 GB\r\n Udev Sync Supported: true\r\n Deferred Removal Enabled: false\r\n Deferred Deletion Enabled: false\r\n Deferred Deleted Device Count: 0\r\n Data loop file: /var/lib/docker/devicemapper/devicemapper/data\r\n Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata\r\n Library Version: 1.02.107-RHEL7 (2015-12-01)\r\nExecution Driver: native-0.2\r\nLogging Driver: json-file\r\nPlugins:\r\n Volume: local\r\n Network: null host bridge\r\nKernel Version: 3.10.0-327.el7.x86_64\r\nOperating System: Red Hat Enterprise Linux Server 7.2 (Maipo)\r\nOSType: linux\r\nArchitecture: x86_64\r\nCPUs: 1\r\nTotal Memory: 991.7 MiB\r\nName: ip-172-30-0-91.ec2.internal\r\nID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S\r\nDocker Root Dir: /var/lib/docker\r\nDebug mode (client): false\r\nDebug mode (server): false\r\nUsername: gordontheturtle\r\nRegistry: https://index.docker.io/v1/\r\nInsecure registries:\r\n myinsecurehost:5000\r\n 127.0.0.0/8\r\n```\n@mroutis looks good!\nRelated: https://github.com/mahmoud/boltons/blob/master/boltons/ecoutils.py\r\n\r\n```\r\n $ python -m boltons.ecoutils\r\n {\r\n \"_eco_version\": \"1.0.0\",\r\n \"cpu_count\": 4,\r\n \"cwd\": \"/home/mahmoud/projects/boltons\",\r\n \"fs_encoding\": \"UTF-8\",\r\n \"guid\": \"6b139e7bbf5ad4ed8d4063bf6235b4d2\",\r\n \"hostfqdn\": \"mahmoud-host\",\r\n \"hostname\": \"mahmoud-host\",\r\n \"linux_dist_name\": \"Ubuntu\",\r\n \"linux_dist_version\": \"14.04\",\r\n \"python\": {\r\n \"argv\": \"boltons/ecoutils.py\",\r\n \"bin\": \"/usr/bin/python\",\r\n \"build_date\": \"Jun 22 2015 17:58:13\",\r\n \"compiler\": \"GCC 4.8.2\",\r\n \"features\": {\r\n \"64bit\": true,\r\n \"expat\": \"expat_2.1.0\",\r\n \"ipv6\": true,\r\n \"openssl\": \"OpenSSL 1.0.1f 6 Jan 2014\",\r\n \"readline\": true,\r\n \"sqlite\": \"3.8.2\",\r\n \"threading\": true,\r\n \"tkinter\": \"8.6\",\r\n \"unicode_wide\": true,\r\n \"zlib\": \"1.2.8\"\r\n },\r\n \"version\": \"2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2]\",\r\n \"version_info\": [\r\n 2,\r\n 7,\r\n 6,\r\n \"final\",\r\n 0\r\n ]\r\n },\r\n \"time_utc\": \"2016-05-24 07:59:40.473140\",\r\n \"time_utc_offset\": -8.0,\r\n \"ulimit_hard\": 4096,\r\n \"ulimit_soft\": 1024,\r\n \"umask\": \"002\",\r\n \"uname\": {\r\n \"machine\": \"x86_64\",\r\n \"node\": \"mahmoud-host\",\r\n \"processor\": \"x86_64\",\r\n \"release\": \"3.13.0-85-generic\",\r\n \"system\": \"Linux\",\r\n \"version\": \"#129-Ubuntu SMP Thu Mar 17 20:50:15 UTC 2016\"\r\n },\r\n \"username\": \"mahmoud\"\r\n }\r\n```\r\n\r\nIt might be more than what we need, but it is a good starting point (and it relies only on the standard library)\nI am currently working on this issue. Will submit a PR after I figure it out :smile:.\nFixed by #1963 \nLeaving open for the last part: listing if dvc is installed from binary or not.\n@efiop where is the binary code released? Are we talking about the `.exe` file?\n@algomaster99 We have a helper to determine whether or not dvc is installed from binary or not. Simply use `is_binary` something like:\r\n```\r\nfrom dvc.utils import is_binary\r\n\r\ninfo += \"Binary: {}\".format(is_binary())\r\n```\n@efiop but where can we find the binary release? There are only `.deb`, `.exe`, `.rpm` and `.pkg` in the releases.\n@algomaster99 Yes, all of those contain binaries built by pyinstaller. Non-binary releases are the ones on homebrew formulas and pypi.":1,"We could dynamically create non-local cache in https://github.com/iterative/dvc/blob/2ee68b01b0a02d78734b8867013464571bd1b18c/dvc/cache/__init__.py#L68 . And we even have https://github.com/iterative/dvc/blob/2ee68b01b0a02d78734b8867013464571bd1b18c/dvc/cache/__init__.py#L75 already.":1,"Looked at this and I see several issues with code, most of which also affect performance:\r\n\r\n1. `DvcIgnore*` classes designed around using them in `walk()`, which leads to excessively complex code handling check for full path.\r\n2. All the complexity above is added to `CleanTree`, which makes it go into `DvcIgnore*` domain. It should really look like:\r\n ```python\r\n def isfile(self, path):\r\n return self.tree.isfile(path) and not self.dvcignore.match_file(path)\r\n ```\r\n3. `DvcIgnore*` hierarchy forces sequential check against all rules and regexes. This might be optimized by constructing single structure, i.e. a big regex or a prefix tree. This is complicated by the fact that dvcignores may contain negations though.\r\n4. `pathspec` library also checks against all regexes sequentially, which adds to 3.\r\n5. **High level issue**. I suspect that we recheck files found via walk, so we run ignores twice. It needs to be estimated whether this is an issue.\nThank you @Suor \r\n\r\nIt seems consistent with what I experienced. The more I add lines in `.dvcignore`, the slowest `dvc status` is.\n> Looked at this and I see several issues with code, most of which also affect performance:\r\n> \r\n> 1. `DvcIgnore*` classes designed around using them in `walk()`, which leads to excessively complex code handling check for full path.\r\n> 2. All the complexity above is added to `CleanTree`, which makes it go into `DvcIgnore*` domain. It should really look like:\r\n> ```python\r\n> def isfile(self, path):\r\n> return self.tree.isfile(path) and not self.dvcignore.match_file(path)\r\n> ```\r\n> 3. `DvcIgnore*` hierarchy forces sequential check against all rules and regexes. This might be optimized by constructing single structure, i.e. a big regex or a prefix tree. This is complicated by the fact that dvcignores may contain negations though.\r\n> 4. `pathspec` library also checks against all regexes sequentially, which adds to 3.\r\n> 5. **High level issue**. I suspect that we recheck files found via walk, so we run ignores twice. It needs to be estimated whether this is an issue.\r\n\r\n@Suor \r\nI also have an interest in this issue and had looked into the code. And here is my solution to some of the issues above:\r\n1. For point (1), `walk()` should use information from `DvcIgnore`. For example, exiting the `.git` directory at the beginning of the iteration.\r\n2. For point (3), according to [this article](https://www.freecodecamp.org/news/regex-was-taking-5-days-flashtext-does-it-in-15-minutes-55f04411025f/), tried tree or automaton would only perform better if the number of ignored-expressions was greater than several hundred. \r\n3. For point (4), In `pathspec` \r\n```python\r\n\tmatched = False\r\n\tfor pattern in patterns:\r\n\t\tif pattern.include is not None:\r\n\t\t\tif file in pattern.match((file,)):\r\n\t\t\t\tmatched = pattern.include\r\n\treturn matched\r\n```\r\nIt should stop if any of the patterns matched the file. \r\n@courentin\r\nAnd I think this is the main reason that It gets slower as the ignore list grows. \r\n\r\nI'd like to try to solve these points this weekend.\n@karajan1001 \r\n1. There is no issue with walk, the ignored dir won't we traversed. The issue is when we need to check whether `some/path/abc/file.txt` is ignored we need to build all of its parents and test them in an unnatural way.\r\n\r\n> It should stop if any of the patterns matched the file.\r\n\r\nSo for very common case that file is not ignored it will match it against all of those.\n> 1. There is no issue with walk, the ignored dir won't we traversed. The issue is when we need to check whether `some/path/abc/file.txt` is ignored we need to build all of its parents and test them in an unnatural way.\r\n\r\nThank you\r\nDos `Dvcignore` support expressions like `../*.csv` which influences files outside the current path?\r\n\r\n\nHaha, underrated the difficulty of it. Only written the benchmark (https://github.com/iterative/dvc-bench/pull/30).\r\n\r\n\r\n\r\n> 5\\. **High level issue**. I suspect that we recheck files found via walk, so we run ignores twice. It needs to be estimated whether this is an issue.\r\n\r\n@Suor, According to @courentin 's call graph in #3867 it only runs once.\n@efiop @pared \r\nI have a question how could I test unmerged changes using `dvc-bench`.\n@karajan1001 \r\nPrepared PR explaining this in README, please take a look and review:\r\nhttps://github.com/iterative/dvc-bench/pull/41\n> Dos Dvcignore support expressions like ../*.csv which influences files outside the current path?\r\n\r\n@karajan1001 No, same as gitignore, it cannot look back in the tree.\n> README\r\n\r\nThank you\n@pared Should we keep this open or are we fully done here?\n@efiop sorry, autoclose. Seems to me we should leave it open. The issue potentially is still present in the case of multiple `dvcignore` files. Also, points noted by @Suor (https://github.com/iterative/dvc/issues/3869#issuecomment-635854916) still need to be addressed.\r\n\r\nEDIT:\r\n#3967 addresses 2 points out of 5 (addressed points are 3 and 4)":1,"I saw this the other day on typed dicts, which python versions support this?\r\n\r\nI'm keen if the implementation isn't too complicated.\nIt's supported in 3.6+ for sure. \n\nGreat, I'll make a PR soon then!\n\nBTW, was this issue closed by accident?":1,"Verbose output from 3:\r\n\r\n```\r\n2021-10-14 14:13:11,336 ERROR: unexpected error - Object of type datetime is not JSON serializable\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/Users/mattseddon/PP/vscode-dvc/demo/.env/lib/python3.9/site-packages/dvc/main.py\", line 55, in main\r\n ret = cmd.do_run()\r\n File \"/Users/mattseddon/PP/vscode-dvc/demo/.env/lib/python3.9/site-packages/dvc/command/base.py\", line 45, in do_run\r\n return self.run()\r\n File \"/Users/mattseddon/PP/vscode-dvc/demo/.env/lib/python3.9/site-packages/dvc/command/experiments.py\", line 509, in run\r\n ui.write_json(all_experiments, default=_format_json)\r\n File \"/Users/mattseddon/PP/vscode-dvc/demo/.env/lib/python3.9/site-packages/dvc/ui/__init__.py\", line 101, in write_json\r\n return self.write(json.dumps(data))\r\n File \"/Users/mattseddon/.pyenv/versions/3.9.5/lib/python3.9/json/__init__.py\", line 231, in dumps\r\n return _default_encoder.encode(obj)\r\n File \"/Users/mattseddon/.pyenv/versions/3.9.5/lib/python3.9/json/encoder.py\", line 199, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/Users/mattseddon/.pyenv/versions/3.9.5/lib/python3.9/json/encoder.py\", line 257, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/Users/mattseddon/.pyenv/versions/3.9.5/lib/python3.9/json/encoder.py\", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nTypeError: Object of type datetime is not JSON serializable\r\n```\nCaused by https://github.com/iterative/dvc/pull/6743\nLooks like we've stopped passing `default` to https://github.com/iterative/dvc/blob/0ae91300523e25e060e1313d0adb8b2eb45c4cce/dvc/ui/__init__.py#L101 , which caused the error.\nI can confirm reverting to 2.7.4 fixes the issue.":1,"BTW, we already do something like that in `utils.http.iter_url()`. Smth like:\r\n```python\r\n def exists(self, path_info):\r\n res = self.request(\"HEAD\", path_info.url)\r\n if res.status_code == 404:\r\n return False\r\n res.raise_for_status()\r\n return True\r\n```\nDoes this still need to be worked on? If so can I work on this? I'll need some guidance on where to start.\n@damakuno Sure! The above comment is talking about https://github.com/iterative/dvc/blob/0669d8d885307edff8971c75ee569baf6f65450c/dvc/tree/http.py#L143 .\nNoted, I should add functional tests to `dvc/dvc/tests/funct/test_tree.py` to verify that it's raising Error status codes as intended yeah? I'll try to get to know how to run the functionality first then add my changes in and create a PR.\n@damakuno Simple unit test would be enough, no need for a functional one :slightly_smiling_face: It would be added to tests/unit/remote/test_http.py .\nBTW, we already do something like that in `utils.http.iter_url()`. Smth like:\r\n```python\r\n def exists(self, path_info):\r\n res = self.request(\"HEAD\", path_info.url)\r\n if res.status_code == 404:\r\n return False\r\n res.raise_for_status()\r\n return True\r\n```\nDoes this still need to be worked on? If so can I work on this? I'll need some guidance on where to start.\n@damakuno Sure! The above comment is talking about https://github.com/iterative/dvc/blob/0669d8d885307edff8971c75ee569baf6f65450c/dvc/tree/http.py#L143 .\nNoted, I should add functional tests to `dvc/dvc/tests/funct/test_tree.py` to verify that it's raising Error status codes as intended yeah? I'll try to get to know how to run the functionality first then add my changes in and create a PR.\n@damakuno Simple unit test would be enough, no need for a functional one :slightly_smiling_face: It would be added to tests/unit/remote/test_http.py .":1,"@piojanu I guess what we should do is check whether or not the data is already ignored by git and if it is then just not add it to yet another gitignore. Does that make sense for your scenario?\nHmm. You would have to check if e.g. whole directory, that the file is in, isn't ignored already. Then it would do fine.\n@piojanu Sure. There is actually a `git check-ignore` command, which we could utilize, unless there is a more straightforward way in GitPython.\n@piojanu Ok, there is actually a `repo.git.check_ignore()` supported already. So basically all we'll have to do is to add a check for it to `Git.ignore()` in https://github.com/iterative/dvc/blob/master/dvc/scm/git.py#L76 . Would you like to submit a PR for it? :slightly_smiling_face: \nI'd love too, but for now I have very little time to sit down to it.\n@piojanu No worries :) We'll try to get to it when we have time then. Thanks for the feedback! :slightly_smiling_face: \nHey @efiop and @piojanu. I can work on this after work tonight. Do you mind if I take this issue?\nHi @J0 ! Sure! Thanks a lot for looking into it! Let us know if you need any help and/or have any questions. :slightly_smiling_face: \nAnyone working on it ? or I can pick this up with some help \n@veera83372 Not sure if @J0 is still working on this. Let's see if he replies. \n@efiop I am still working on this issue. Sorry for the long delay — just opened a PR\nIs this still being worked on?\n@J0 Are you still working on this?\n@Aljo-Rovco I doubt. The PR (empty?) was closed a while ago. Feel free to pick it up and contribute and let us know if you'd need any help from us.\n@shcheklein was planning to give 1 day for @J0 to reply and then give this a go, but ok :)\n@Aljo-Rovco Looks like @J0 is not responding, so indeed, please feel free to take this one. Thanks a lot for looking into it! 🙂 \n@Aljo-Rovco Are you working on this?\nHey sorry, was swamped. Not sure how well I'm suited to contribute, but I can try. Would it just be a check here: https://github.com/iterative/dvc/blob/master/dvc/scm/git/__init__.py#L137 ?\n@Aljo-Rovco No worries. That would be the place to check, yes :slightly_smiling_face: ":1,"I can reproduce it. it's a bug of `dvc add`\nrelated: https://github.com/iterative/dvc-objects/issues/177\nIt is happening because we consider the filename also a URL. And urlparse returns filena as scheme.\nThis should be resolved by https://github.com/iterative/dvc-objects/pull/179\n> This should be resolved by https://github.com/iterative/dvc-objects/pull/179\r\n\r\n@pmrowla This one is different and not resolved by the link. It fails at config validation:\r\n\r\nhttps://github.com/iterative/dvc/blob/f68a12558aac7da25c721877e855d5c115d07261/dvc/config_schema.py#L50-L55\r\n\r\n\r\n\r\n\r\n\r\n\nI can reproduce it. it's a bug of `dvc add`\nrelated: https://github.com/iterative/dvc-objects/issues/177\nIt is happening because we consider the filename also a URL. And urlparse returns filena as scheme.\nThis should be resolved by https://github.com/iterative/dvc-objects/pull/179\n> This should be resolved by https://github.com/iterative/dvc-objects/pull/179\r\n\r\n@pmrowla This one is different and not resolved by the link. It fails at config validation:\r\n\r\nhttps://github.com/iterative/dvc/blob/f68a12558aac7da25c721877e855d5c115d07261/dvc/config_schema.py#L50-L55\r\n\r\n\r\n\r\n\r\n\r\n":1,"thanks for reporting, PR welcome.\nJust saw this, ugh. Will make a PR soon :)":1,"Hi @nik123 !\r\n\r\n`status` never had `-R` flag. We need to introduce it. All logic is there, we would simply need to add it to `dvc/command/status.py` and `dvc/repo/status.py`, that would then pass it to `collect` and `used_cache` in `dvc/repo/status.py`, similar how push/pull/fetch/etc do that.":1,"BTW if you actually run my example command to modify the inexistent `name` param, it gets written to .dvc/config, which causes an error output whenever you try to change the config later:\r\n\r\n```\r\nERROR: configuration error - config file error:\r\nextra keys not allowed @ data['remote']['myremote']['name']\r\n```\r\n\r\nYou have to manually fix the config file to fix this.\n> which causes an error output whenever you try to change the config later:\r\n\r\n(Yep, I hit the same issue here https://github.com/iterative/dvc/issues/3552 - quite annoying one :) )\r\n\r\n> A better workaroud is to edit the .dvc/config file but relatively advanced.\r\n\r\nnot sure this is very advanced to be honest\r\n\nNot advanced in terms of skill, but you need to know that remote config is in that file. It won't be obvious knowledge for DVC beginners. Also for gdrive remotes the file in .dvc/tmp/...json would also need to be changed manually, I think (if it already exists).\n> Also for gdrive remotes the file in .dvc/tmp/...json would also need to be changed manually, I think (if it already exists).\r\n\r\nit doesn't contain the remote name yet. With GDrive multiple remotes are not supported effectively yet.\r\n\r\n> It won't be obvious knowledge for DVC beginners.\r\n\r\nit's a good question. I think for people familiar with command line, git, etc it should be more or less intuitive that there is a config somewhere. \nOK. No strong opinion. I would err on the side of adding this which I'm assuming should be pretty easy, since `git remote` allows you to do it, so you may expect this if you're used to Git. + the other reasons outlines. Feel fee to close this though, up to you! Thanks\nI'd like to look into it. @jorgeorpinel How about make it `dvc remote rename ` following the similar command in Git `git remote rename `.\nGood idea, indeed that's how it works on Git. But since we already have a `modify` subcommand to change all possible remote config I'd prefer to add a `name` config param for that.\nIt does not hurt to add both? Is there some modify command analog in GIt?\nNot really. They have a subcommand for each remote config param you can change: https://git-scm.com/docs/git-remote\n`name` is ugly. `dvc remote rename` is much better.\nPlus `name` complicates the argument resolution, I really don't like that and don't think it is worth bothering with. Let's just introduce `dvc remote rename` as suggested by @karajan1001 .\nI have no strong opinion. Either introduce `name` to https://dvc.org/doc/command-reference/remote/modify#available-parameters-for-all-remotes (implies less maintenance both in core code and docs) or introduce `rename` subcommand.":1,"I'll pick this up and submit a PR soon.\n> i'm not personally sold on `file:` prefix in path (just because I don't really know why git does that, maybe there is a good reason), but the rest looks pretty good. We now have `dvc config --list`, so now we need `dvc config --list --show-origin` as the next step.\r\n\r\nFor reference, it's because git config options can come from other sources - stdin, command line, or git blobs. I don't think we would ever have anything besides `file` in DVC though\n@pmrowla Oh, didn't know that! Makes sense now. Thank you for clarifying! :pray: \nReopening to support it for a new correct `dvc config` behavior. https://github.com/iterative/dvc/pull/5184":1,"Hello @mvanderlee \r\nThe config depends on the model not the type of the field, even if this one is also a model.\r\nSo you'll need to duplicate your config or add it in your own custom BaseModel.\nDuplicating the config is a non-starter for us. \r\nSo you're saying that there is currently no way to ensure a Model will always be serialized a certain way? Other than manually defining a json encoder and always specifying it on json.dump. Which again, would be a non-started for us. \r\n\r\nI need to allow Models to be 2-way serializable.\r\n\r\nYou mentioned a custom BaseModel, do you have an example? \nSure thing!\r\n```python\r\nfrom datetime import datetime, timedelta\r\nfrom pydantic import BaseModel as PydanticBaseModel\r\nfrom pydantic.json import timedelta_isoformat\r\n\r\n\r\nclass BaseModel(PydanticBaseModel):\r\n \"\"\"\r\n All the instances of BaseModel should serialize\r\n those types the same way\r\n \"\"\"\r\n class Config:\r\n json_encoders = {\r\n datetime: lambda v: v.timestamp(),\r\n timedelta: timedelta_isoformat,\r\n }\r\n\r\n\r\nclass WithCustomEncoders(BaseModel):\r\n dt: datetime\r\n diff: timedelta\r\n\r\n\r\nclass ParentWithoutEncoders(BaseModel):\r\n child: WithCustomEncoders\r\n\r\n\r\nm = WithCustomEncoders(dt=datetime(2032, 6, 1), diff=timedelta(hours=100))\r\nprint(m.json())\r\n# {\"dt\": 1969653600.0, \"diff\": \"P4DT4H0M0.000000S\"}\r\n\r\np = ParentWithoutEncoders(child=m)\r\nprint(p.json())\r\n# {\"child\": {\"dt\": 1969653600.0, \"diff\": \"P4DT4H0M0.000000S\"}}\r\n```\r\nThe magic is possible because `Config` of a subclass inherits from the `Config` of its parent!\r\n\r\nYou can also change default value of `BaseConfig`, which all `Config` classes inherit from\r\n\r\n```python\r\nfrom datetime import datetime, timedelta\r\nfrom pydantic import BaseConfig, BaseModel\r\nfrom pydantic.json import timedelta_isoformat\r\n\r\n# All the instances of BaseModel should serialize those types the same way\r\nBaseConfig.json_encoders = {\r\n datetime: lambda v: v.timestamp(),\r\n timedelta: timedelta_isoformat,\r\n}\r\n\r\n\r\nclass WithCustomEncoders(BaseModel):\r\n dt: datetime\r\n diff: timedelta\r\n\r\n\r\nclass ParentWithoutEncoders(BaseModel):\r\n child: WithCustomEncoders\r\n\r\n\r\nm = WithCustomEncoders(dt=datetime(2032, 6, 1), diff=timedelta(hours=100))\r\nprint(m.json())\r\n# {\"dt\": 1969653600.0, \"diff\": \"P4DT4H0M0.000000S\"}\r\n\r\np = ParentWithoutEncoders(child=m)\r\nprint(p.json())\r\n# {\"child\": {\"dt\": 1969653600.0, \"diff\": \"P4DT4H0M0.000000S\"}}\r\n```\r\n\r\nHope it helps :)\nRight, but now if I add a datetime field to the parent. Then both would use the same format. \r\nI'm looking to be able to define how a particular Model serializes, not all data types within the context. \r\n\r\nWe can validate, and deserialize per model, but not serialize per model as far I can tell. \r\n\r\n\r\n```python\r\nfrom datetime import datetime, timedelta\r\nfrom pydantic import BaseModel, validator\r\nfrom pydantic.json import timedelta_isoformat\r\n\r\n\r\nclass WithCustomEncoders(BaseModel):\r\n dt: datetime\r\n diff: timedelta\r\n\r\n @validator('dt', pre=True)\r\n def validate_dt(cls, v):\r\n try:\r\n return datetime.fromtimestamp(v)\r\n except Exception as e:\r\n raise ValueError('must be a valid timestamp', e)\r\n\r\n class Config:\r\n json_encoders = {\r\n datetime: lambda v: v.timestamp(),\r\n timedelta: timedelta_isoformat,\r\n }\r\n\r\n\r\nclass ParentWithoutEncoders(BaseModel):\r\n child: WithCustomEncoders\r\n p_dt: datetime\r\n\r\n @validator('p_dt', pre=True)\r\n def validate_p_dt(cls, v):\r\n try:\r\n return datetime.fromisoformat(v)\r\n except Exception as e:\r\n raise ValueError('must be valid iso string', e)\r\n\r\n class Config:\r\n json_encoders = {\r\n datetime: lambda v: v.isoformat()\r\n }\r\n\r\nraw_m = '{\"dt\": 1969671600.0, \"diff\": \"P4DT4H0M0.000000S\"}'\r\nraw_p = '{\"child\": {\"dt\": 1969671600.0, \"diff\": \"P4DT4H0M0.000000S\"}, \"p_dt\": \"2032-06-01T00:00:00\"}'\r\n\r\nm = WithCustomEncoders.parse_raw(raw_m)\r\np = ParentWithoutEncoders.parse_raw(raw_p)\r\n\r\nprint(m.json())\r\nprint(p.json())\r\n\r\nassert m.json() == raw_m\r\nassert p.json() == raw_p\r\n\r\n# {\"dt\": 1969671600.0, \"diff\": \"P4DT4H0M0.000000S\"}\r\n# {\"child\": {\"dt\": \"2032-06-01T00:00:00\", \"diff\": 360000.0}, \"p_dt\": \"2032-06-01T00:00:00\"}\r\n\r\n# Traceback (most recent call last):\r\n# File \"\", line 50, in \r\n# AssertionError\r\n```\r\n\r\nThe only way I currently see is to override the `_iter` function, i.e.:\r\n```python\r\nclass WithCustomEncoders(BaseModel): \r\n dt: datetime \r\n diff: timedelta \r\n \r\n @validator('dt', pre=True) \r\n def validate_dt(cls, v): \r\n try: \r\n return datetime.fromtimestamp(v) \r\n except Exception as e: \r\n raise ValueError('must be a valid timestamp', e) \r\n \r\n class Config: \r\n json_encoders = { \r\n datetime: lambda v: v.timestamp(), \r\n timedelta: timedelta_isoformat, \r\n } \r\n \r\n def _iter(self, *args, **kwargs): \r\n for key, v in super()._iter(*args, **kwargs): \r\n if key == 'dt': \r\n yield key, v.timestamp() \r\n elif key == 'diff': \r\n yield key, timedelta_isoformat(v) \r\n else: \r\n yield key, v\r\n```\r\n\r\nEdit: `_iter(...)` if it should also apply to `dict(...)` otherwise overriding `json(...)` in a similar fashion would work as well if it should only apply to json dumping.\nI agree with @mvanderlee that it's not clear at first that the parent model that is converting to JSON establishes all the encoders and the nested models are converted using those. This appears to be because the whole object is converted into a dict and then the JSON encoder of the parent model is applied to the dictionary. An alternative might be to recursively call the JSON encoder on any pydantic BaseModel children and incorporate along the way.\r\n\r\n@mvanderlee if you're interested in a PR, you might look at modifying https://github.com/samuelcolvin/pydantic/blob/13a5c7d676167b415080de5e6e6a74bea095b239/pydantic/main.py#L474-L509\r\n\r\nThis is likely to be much slower than the single call to JSON encoder, so I'd recommend making it an option or even a new method?\nAgreed with the other folks here that I can see why the current behavior is implemented the way it is, but there may be use-cases where you might want to serialize different components of the same type differently. \r\n\r\nPerhaps I hadn't been looking at the right parts of the documentation, but I didn't really understand the `Config` inheritance until I stumbled upon this issue and saw [@PrettyWood's explanation](https://github.com/samuelcolvin/pydantic/issues/2277#issuecomment-764010272). Would it be possible to add some of this discussion to the documentation if it's not already there?\r\n\r\nIn the interim, based on @PrettyWood's example, what I came up with was just to create subclasses of the nested object (maybe this is overkill, would appreciate any suggestions!). Using a similar example:\r\n\r\n```python\r\nclass DatetimeA(datetime.datetime):\r\n \"\"\"Datetime that will be encoded as a string in pydantic Config.\"\"\"\r\n def __new__(cls, *args, **kwargs):\r\n return datetime.datetime.__new__(cls, *args, **kwargs)\r\n\r\nclass DatetimeB(datetime.datetime):\r\n \"\"\"Datetime that will be encoded as a timestamp in pydantic Config.\"\"\"\r\n def __new__(cls, *args, **kwargs):\r\n return datetime.datetime.__new__(cls, *args, **kwargs)\r\n\r\nclass MyBaseModel(BaseModel):\r\n class Config:\r\n json_encoders = {\r\n DatetimeA: lambda v: str(v),\r\n DatetimeB: lambda v: v.timestamp()\r\n }\r\n\r\nclass TestModel(MyBaseModel):\r\n datetime_a: DatetimeA\r\n datetime_b: DatetimeB\r\n\r\nclass ParentModel(MyBaseModel):\r\n test_model: TestModel\r\n\r\ntest_model = TestModel(\r\n datetime_a=DatetimeA(2020, 1, 1),\r\n datetime_b=DatetimeB(2015, 1, 1)\r\n)\r\n\r\nparent_model = ParentModel(\r\n test_model=test_model\r\n)\r\n\r\nprint(test_model.json())\r\nprint(parent_model.json())\r\n```\r\nOutput:\r\n```python\r\n# '{\"datetime_a\": \"2020-01-01 00:00:00\", \"datetime_b\": 1420088400.0}'\r\n# '{\"test_model\": {\"datetime_a\": \"2020-01-01 00:00:00\", \"datetime_b\": 1420088400.0}}'\r\n```\nI just ran into this issue... after a while of debugging I realized that my `Config` wasn't respected in nested models.\r\n\r\nAlthough for my project it's OK to simply set a \"global\" encoder, I can see the value in defining how to encode a specific model within that model's Config and having that respected even when that model is nested. This is especially true when a module/library exposes public pydantic models for others to use.":1,"I'm hesitant about pseudo types, without reading the docs it's not clear what this does.\r\n\r\nWhat we could do is just is the function as the default/initial value? Then unless the type of the field was `Callable` it would call the function to generate a value.\r\n\r\nThis would not be completely backwards, but I think it would be more intuitive. \nThis would work as well! The main idea would be to reduce the amount of boilerplate code required for this type of usage.\nI think this might be better suited as a keyword argument to Schema (or Field now), or similar. That’s how this functionality is handled by both attrs and dataclasses.\n> I think this might be better suited as a keyword argument to Schema (or Field now), or similar. That’s how this functionality is handled by both attrs and dataclasses.\r\n\r\nagreed.\n#1210 will implement `Field(default_factory=...)`.\r\n\r\nFor use of `ts: datetime = datetime.now` we should wait for v2.":1,"after a debate today with a fellow programmer I had my eyes opened to the fact that pascal case is a type of camel case... so the naming isn't wrong.\r\n\r\nI think ill submit a PR of the second option, as I'm sure I'm not the only one that would make use of the `to_lower_camel()` function.":1,"Some more cases:\r\n\r\n```\r\n>>> from collections import deque\r\n>>> from pydantic import BaseModel\r\n>>> class D(BaseModel):\r\n... q: deque[int] = deque(maxlen=10)\r\n... \r\n>>> d = D()\r\n>>> d\r\nD(q=deque([], maxlen=10))\r\n\r\n>>> d.q = deque(maxlen=25)\r\n>>> d\r\nD(q=deque([], maxlen=25))\r\n```\r\n\r\nSo it seems that it is validation issue on model creation only.\r\n\r\nI do not know the codebase here but potentially:\r\n\r\nhttps://github.com/pydantic/pydantic/blob/f35c780cc3cf073c400ee038b8b6dd6d7b5703d9/pydantic/v1/fields.py#L954C25-L954C25\nThanks @maciekglowka for this issue 🙏 \r\n\r\nAs you may know, `Pydantic V2` is out and we are not actively working on `V1`.\r\n\r\nI've checked your first example code on V2, all of them returning `D(q=deque([], maxlen=15))`\r\n\r\nSame for you second example. In `V2` you can have validation on assignment to a `deque` field like:\r\n\r\n```py\r\nfrom collections import deque\r\nfrom typing import Deque\r\n\r\nfrom typing_extensions import Annotated\r\nfrom pydantic import BaseModel, ConfigDict, Field\r\n\r\nclass D(BaseModel):\r\n q: Annotated[Deque[int], Field(deque(maxlen=10), max_length=10)]\r\n\r\n model_config = ConfigDict(validate_assignment=True)\r\n\r\nd = D()\r\nd.q = deque([1] * 25)\r\n```\r\n\r\nNote: We still accept bug fixes on `V1` but it should be an easy fix.\nThanks for checking.\r\nYeah I've just forked the repo. If I'll be able to fix this I'm gonna make a PR.":1,"happy to accept a PR to fix this.\nJust to clarify, is any length between 13 and 19 digits inclusive allowed? Or just 13, 16, or 19 digits?\nAFAIU it's not inclusive. Did some research and this formatting [website](https://www.freeformatter.com/credit-card-number-generator-validator.html#cardFormats) came up.\r\n\r\nLet me ask you something about implementation. We may create subtypes following faker's approach (`visa13`, `visa19`, `visa16`). In my opinion, we shouldn't replace `visa` in favor of `visa16` as it's going to break. Let's keep the non explicit `visa` brand ? Any thoughts about this ?\nI'm opposed to having multiple types. We should have just one card type, people can subclass that or add validators to constrain types more if they wish.":1,"Thank you for reporting this Sebastian. This one is the documentation repository. I'm moving this to the DVC repository for the core team to directly resolve it. \n.dvcignore would not be created by a DVC command. You must have created that file @VildMedPap , thus `dvc destroy` lets it be. See https://dvc.org/doc/user-guide/project-structure/dvcignore-files for more info. Does that answer your question?\nAre you sure about that? Please see the attached gif.\r\n\r\nI have installed `dvc` through brew as stated in the [documentation](https://dvc.org/doc/install/macos), version `2.9.3`.\r\n\r\n![dvc-init](https://user-images.githubusercontent.com/23236622/152838506-a0f72d8a-49c4-4a77-a7f6-5bf4e7d00948.gif)\nInteresting. Should've tested before reply but I was away form my regular environment. Let's see if there's a reason for this behavior, waiting for @iterative/dvc team to assess. Thanks\np.s. since it looks like .dvcignore creation is now part of `dvc init`, we should update https://dvc.org/doc/user-guide/project-structure/dvcignore-files which currently states users need to create that file. Maybe https://dvc.org/doc/command-reference/init too.":1,"Thanks for reporting.\r\n\r\nHappy to accept a PR to add a proper `__eq__` method to `NameEmail`":1,"Thanks @JensHeinrich for reporting this issue.\r\n\r\nI can confirm the issue. `partial()` returns an instance, not a class or function.\r\n\r\nit may fix by accessing the function in the following line. I mean replacing `f_cls.__func__.__qualname__` with `f_cls.__func__.func.__qualname__` in case of `partial`\r\n\r\nhttps://github.com/pydantic/pydantic/blob/73373c3e08fe5fe23e4b05f549ea34e0da6a16b7/pydantic/_internal/_decorators.py#L204\r\n\r\nWould you like to open a PR?\nI am already working on a PR, but at the moment I just add a better error\nEven if the `ref` is created in a better way (or the check just skipped by using `allow_reuse=True`) another `AttributeError` is raised [here](https://github.com/pydantic/pydantic/blob/10b8ec7dde62f5e4948a09b9114c5ac64d786ded/pydantic/class_validators.py#L170)\nAlso signature creation doesn't work on it\n> Even if the `ref` is created in a better way (or the check just skipped by using `allow_reuse=True`) another `AttributeError` is raised [here](https://github.com/pydantic/pydantic/blob/10b8ec7dde62f5e4948a09b9114c5ac64d786ded/pydantic/class_validators.py#L170)\r\n\r\nThis one also can be fixed by the same change that I mentioned before.\r\n\r\n> I am already working on a PR, but at the moment I just add a better error\r\n\r\nI think we can make it work at least on `1.10.x` but I am not sure about `V2`.\r\n\r\n@samuelcolvin Do you think we should prevent using `functools.partial` as a validator?\r\n\nTBH I feel `functools` is broken here and we are just adding a workaround.\r\nEven a `lambda` gets an `__qualname__`\n> TBH I feel `functools` is broken here and we are just adding a workaround. Even a `lambda` gets an `__qualname__`\r\n\r\nagreed. Feel free to create an issue on cpython and copy me into it, I'd be interest to see what they say.\r\n\r\nAlso, can you create a PR against main to see if this is working there?\nThis one can be related https://bugs.python.org/issue34475\nI created an issue python/cpython#102323\nI personally find the responses in the links from the last two comments reasonable; it seems we should just not assume that `__qualname__` definitely exists.\nI'm good with using a default value instead, too.\nI am working on a PR for v1.10 already\nI would only allow those with `reuse=True`. Would you support that @samuelcolvin @dmontagu ?":1,"It seems that the breaking change was introduced in https://github.com/iterative/dvc/pull/6378 as part of the [2.6.0 release](https://github.com/iterative/dvc/releases/tag/2.6.0). I will take a look and try to come back with more details.\r\n\r\nPd. There is an [open issue](https://github.com/iterative/dvc/issues/5477) about whether we should allow using `-S` to create new params or not. It would be nice if you could expose there your use case, which seems to rely on allowing to create new params on the fly.\r\n\r\n\nI see thanks! Just added my own thoughts on the matter there :+1: \n@skshetry Should we put an upper cap for it for now?\r\n\r\nalso, for the record: this is clearly +1 to the microbenchmarks that you've proposed before.\nµbenchmarks do show a 2-4x slowdown, but it still feels theoretical. We lack benchmarks for pipelines, so it's hard to tell.\nDiscussed with @efiop to get rid of the dependency, we only use it in `--set-params`. We need to see what it'll take to replace it.\nMay be worth revisiting https://github.com/iterative/dvc/issues/4883\nBenedict's merge module appears to be fairly self-contained https://github.com/fabiocaccamo/python-benedict/blob/c98c471065ae84b4752a87f1bd63fe3987783663/benedict/core/merge.py . A possible stopgap could be to remove the bendict import and copy this module into the DVC codebase instead. I have done something similar for keypath parsing currently in #6521 (8d219dc4c4be678f8e12a57f766d282d13997443).\nI think I'm more inclined on working around benedict than replacing it. We should rollback this particular change that it does on import:\r\n\r\nhttps://github.com/fabiocaccamo/python-benedict/blob/c98c471065ae84b4752a87f1bd63fe3987783663/benedict/dicts/__init__.py#L283-L285\nIt is worth having a dependency that requires workarounds if we only use ~20 lines of that dependency?\r\n\r\nConsidering it causes other side effects/bugs like #6378 #6476\n@mattlbeck, we still need to unflatten those and it comes from [`keylist_util.py`](https://github.com/fabiocaccamo/python-benedict/blob/c98c471065ae84b4752a87f1bd63fe3987783663/benedict/dicts/keylist/keylist_util.py). \r\n\r\nI have not been able to work on this, and will not be able to work for few days at least\r\nEasier way is to workaround benedict, that should help us move ahead #6476 and #6521. \n":1,"Thanks @mattseddon! Could you describe how it impacts VS Code and how important it is?\r\n\r\nI see that it's inconsistent with other commands, but both behaviors seem initially reasonable to me, and since I have never seen a user complaint about it, I would only prioritize it based on your needs.\nThis broke the workflow of getting setup with a new project/repository. That is something that we want to cover with the extension so it is fairly important to us. \r\n\r\nHere are a couple of thoughts that I've had:\r\n\r\n1. If the error is expected (which I think is reasonable) then we should not get an \"unexpected error\"\r\n2. If we have to have a commit for some commands to work then maybe `dvc init` should generate that commit.":1,"I'll take it. @efiop If you have any hints to make work more efficient, I'll grateful :)":1,"It's happening to other users as well (discussion with Benjamin on Discord). Symptoms are very similar - zsh (a regular one), PATH is modified. Not `conda`, virtualenv is being used. OS - Mac.\n@shcheklein in their case, is DVC installed in the \"parent\" Python environment, or in a separate virtualenv? The problem might lie in something that has to do with the parent/child relationship.\r\n\r\nAlso, did they use Homebrew to install Python? Brew is another common factor here, and more likely to cause problems than Zsh, since Brew does its own layer of symlinking.\r\n\r\nMy admittedly convoluted setup:\r\n```\r\nLinuxbrew\r\n├─ Pyenv\r\n│  └─ Conda <- PATH is broken when DVC is installed here\r\n│ └─ Active conda environment <- PATH is OK when DVC is installed here\r\n└─ Python\r\n └─ Pipx-managed Virtualenv <- PATH is OK when DVC is installed here\r\n```\n@gwerbin Thanks! I've asked Benjamin to take a look and share more details. \r\n\n> @shcheklein in their case, is DVC installed in the \"parent\" Python environment, or in a separate virtualenv? The problem might lie in something that has to do with the parent/child relationship.\r\n\r\nI have tried two setups, and both fail the sense that:\r\n\r\n1. The error `ImportError: No module named pandas` is returned.\r\n2. `dvc run -o test 'which python > test'` outputs `/usr/local/bin/python` in the `test` file, where it should point to `python` in the virtualenv.\r\n\r\nSetup 1\r\n> ```\r\n> Homebrew\r\n> └─ DVC (`/usr/local/bin/dvc`)\r\n> └─ Virtualenv + Python (`/usr/local/bin/{python,virtualenv}`)\r\n> └─Active virtualenv environment\r\n> └─ Pandas\r\n> ```\r\n\r\nSetup 2\r\n> ```\r\n> Homebrew\r\n> └─ Virtualenv + Python (`/usr/local/bin/{python,virtualenv}`)\r\n> └─Active virtualenv environment\r\n> └─ DVC\r\n> └─ Pandas\r\n> ```\r\n\r\n> Also, did they use Homebrew to install Python? Brew is another common factor here, and more likely to cause problems than Zsh, since Brew does its own layer of symlinking.\r\n\r\nYes, Python was installed by Homebrew. (FYI: the Python interpreter that comes with the latest version of macOS (Mojave, version 10.14.6) is 2.7.10 and is 4.5 years old. I figure most people using Python on macOS will have shadowed this outdated version with a more recent one.)\r\n\r\n@shcheklein and @efiop asked me to share the output of a few commands on the Discord channels and perhaps it helps if I share it here as well.\r\n\r\n```\r\n> echo $SHELL\r\n> dvc run -f test.dvc 'echo $SHELL'\r\n> ls -la $SHELL\r\n> file $SHELL\r\n/bin/zsh\r\n'test.dvc' already exists. Do you wish to run the command and overwrite it? [y/n] y\r\nRunning command:\r\n echo $SHELL\r\n/bin/zsh\r\nSaving information to 'test.dvc'.\r\n\r\nTo track the changes with git, run:\r\n\r\n git add test.dvc\r\n-rwxr-xr-x 1 root wheel 610240 May 4 09:05 /bin/zsh\r\n/bin/zsh: Mach-O 64-bit executable x86_64\r\n> cat test.dvc\r\ncmd: echo $SHELL\r\nmd5: ee3b44e50705d557b7aa3eef74821f74\r\n```\r\n\r\nI wish I could help out more, but my knowledge of Python environments and DVC internals is very limited. However, let me know if I can help you out with further information and I'm happy to provide it.\nFor the record: I am able to reproduce https://github.com/iterative/dvc/issues/2506#issue-494639954 even on the linux machine.\r\n\r\nIn my case `which dvc` shows pyenv shim, which has something like:\r\n```\r\nexec \"/home/efiop/.pyenv/libexec/pyenv\" exec \"$program\" \"$@\"\r\n```\r\nin it, which is the thing that adds some stuff on top of the base env, as we can see:\r\n```\r\n➜ dvc-test git:(755) ✗ /home/efiop/.pyenv/libexec/pyenv exec --help\r\nUsage: pyenv exec [arg1 arg2...]\r\n\r\nRuns an executable by first preparing PATH so that the selected Python\r\nversion's `bin' directory is at the front.\r\n\r\nFor example, if the currently selected Python version is 2.7.6:\r\n pyenv exec pip install -rrequirements.txt\r\n\r\nis equivalent to:\r\n PATH=\"$PYENV_ROOT/versions/2.7.6/bin:$PATH\" pip install -rrequirements.txt\r\n```\r\n\r\nIt would be nice of pyenv would've left something like `PATH_ORIG` env var, so that we could use it later. This would be similar to how pyinstaller leaves VAR_ORIG if it changes it, e.g. LD_LIBRARY_PATH. Looking for possible and viable automatic workarounds. Might have to suggest this to pyenv later though, to make it straightforward for everyone.\nInteresting detail: our good friend @AlJohri has run into it before even using dvc: https://github.com/pyenv/pyenv/issues/985 🙂 ":1,"Yep this is definitely a bug.\r\n\r\nIn general, I think it should be possible to subclass even concrete generic models. I'm hopeful it's not too hard to implement.\n@dmontagu is the expert on `GenericModel`, any idea?\nI'm hitting this as well, it looks like the value of \"concrete\" is calculated by running:\r\n\r\n```\r\n concrete = all(not _is_typevar(v) for v in concrete_type_hints.values())\r\n```\r\n\r\nand Optional[T] does not qualify as a typevar even though it should.\r\nThe same goes for any more nested objects (including nested generic models).\r\n\r\nMost of it can probably be fixed by modifying `_is_typevar` to detect nested type vars.\n@dmontagu is the expert on `GenericModel`, any idea?\nI'm hitting this as well, it looks like the value of \"concrete\" is calculated by running:\r\n\r\n```\r\n concrete = all(not _is_typevar(v) for v in concrete_type_hints.values())\r\n```\r\n\r\nand Optional[T] does not qualify as a typevar even though it should.\r\nThe same goes for any more nested objects (including nested generic models).\r\n\r\nMost of it can probably be fixed by modifying `_is_typevar` to detect nested type vars.":1,"Thanks for reporting this, @jaredsampson !\r\n\r\nIncluding the stack trace:\r\n\r\n```\r\nERROR: file/directory 'data' is specified as an output in more than one stage: data.dvc\r\n data.dvc\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/dvc/command/add.py\", line 25, in run\r\n fname=self.args.file,\r\n File \"/dvc/repo/__init__.py\", line 36, in wrapper\r\n ret = f(repo, *args, **kwargs)\r\n File \"/dvc/repo/scm_context.py\", line 4, in run\r\n result = method(repo, *args, **kw)\r\n File \"/dvc/repo/add.py\", line 50, in add\r\n repo.check_modified_graph(stages)\r\n File \"/dvc/repo/__init__.py\", line 177, in check_modified_graph\r\n self._collect_graph(self.stages + new_stages)\r\n File \"/dvc/repo/__init__.py\", line 327, in _collect_graph\r\n raise OutputDuplicationError(str(out), stages)\r\ndvc.exceptions.OutputDuplicationError: file/directory 'data' is specified as an output in more than one stage: data.dvc\r\n data.dvc\r\n------------------------------------------------------------\r\n```\nI get the same behavior with a flat file foo.txt instead of `data/foo` as well:\r\n\r\n```\r\ndvc init --no-scm\r\nmkdir data\r\necho \"foo\" > foo.txt\r\ndvc add foo.txt\r\necho \"bar\" > bar.txt\r\ndvc unprotect foo.txt\r\necho \"change\" > foo.txt\r\ndvc add foo.txt bar.txt\r\n```\r\n\r\nyields \r\n\r\n```\r\nWARNING: Output 'foo.txt' of 'foo.txt.dvc' changed because it is 'modified'\r\n100%|██████████|Add 2.00/2.00 [00:01<00:00, 1.51file/s]\r\nERROR: file/directory 'foo.txt' is specified as an output in more than one stage: foo.txt.dvc\r\n foo.txt.dvc\r\n```\r\n\r\nHowever, reversing the order of the files in the last command, everything works fine:\r\n\r\n```\r\n$ dvc add foo.txt bar.txt\r\nWARNING: Output 'foo.txt' of 'foo.txt.dvc' changed because it is 'modified'\r\n100% Add 2.00/2.00 [00:01<00:00, 1.47file/s]\r\n```\nThanks for the additional info, @jaredsampson !\r\n\r\nThe problem seems to be related to the way we collect the stages.\n```\r\n#!/bin/bash\r\n\r\nset -e\r\nset -x\r\n\r\nrm -rf myrepo\r\nmkdir myrepo\r\ncd myrepo\r\n\r\ndvc init --no-scm\r\n\r\nmkdir data\r\n\r\necho \"foo\" > foo.txt\r\ndvc add foo.txt\r\n\r\necho \"bar\" > bar.txt\r\ndvc unprotect foo.txt\r\n\r\necho \"change\" > foo.txt\r\ndvc add foo.txt bar.txt\r\n```\r\n\r\ndoesn't reproduce it for me 🙁I suspect that the original issue might be cause by us not deduping the targets in dvc/repo/add.py, but I haven't looked into it closely enough.\n@efiop , change the order of the last call and it should work:\r\n```diff\r\n- dvc add foo.txt bar.txt\r\n+ dvc add bar.txt foo.txt\r\n```\nIf someone is willing to give it a try, here's a test that you can play with:\r\n\r\n```diff\r\ndiff --git a/tests/func/test_add.py b/tests/func/test_add.py\r\nindex 6d51f233..7328c84d 100644\r\n--- a/tests/func/test_add.py\r\n+++ b/tests/func/test_add.py\r\n@@ -24,7 +24,7 @@ from dvc.system import System\r\n from dvc.utils import file_md5\r\n from dvc.utils import LARGE_DIR_SIZE\r\n from dvc.utils import relpath\r\n-from dvc.utils.compat import range\r\n+from dvc.utils.compat import range, pathlib\r\n from dvc.utils.stage import load_stage_file\r\n from tests.basic_env import TestDvc\r\n from tests.utils import get_gitignore_content\r\n@@ -649,3 +649,10 @@ def test_escape_gitignore_entries(git, dvc_repo, repo_dir):\r\n dvc_repo.add(fname)\r\n \r\n assert ignored_fname in get_gitignore_content()\r\n+\r\n+\r\n+def test_adding_several_files_after_one_has_been_modified(dvc_repo):\r\n+ # https://github.com/iterative/dvc/issues/2886\r\n+ dvc_repo.add('foo')\r\n+ pathlib.Path('foo').write_text('change')\r\n+ dvc_repo.add(['bar', 'foo'])\r\n```\r\n\r\nMight need revisiting the graph building process :grimacing: \n@iterative/engineering it seems like an important bug. Should we make it p0?\ncan confirm that problem still exists on master:\r\n```\r\n#!/bin/bash\r\n\r\nrm -rf repo\r\nmkdir repo\r\n\r\npushd repo\r\ngit init --quiet\r\ndvc init -q\r\n\r\nset -ex\r\n\r\nmkdir data\r\n\r\necho foo>>data/foo\r\ndvc add -q data\r\n\r\necho bar>>bar\r\necho change>>data/foo\r\n\r\ndvc add bar data\r\n```\nOk, so little investigation helped me to narrow down the issue.\r\nThe bug shows up when we are re-`add`-ing existing DVC-tracked output, but only after calling \r\n`dvc.stages` first. For example:\r\nLatest version i checked with: 0.86.1\r\n```\r\ndef test_re_add(tmp_dir, scm, dvc):\r\n tmp_dir.dvc_gen( {\"file\": \"file content\"})\r\n\r\n tmp_dir.gen(\r\n {\"file\": \"modified content\"}\r\n )\r\n dvc.stages\r\n dvc.add([\"file\"])\r\n```\r\nWe will get the same OutputDuplicationError.\r\n\r\n1. Why this is happening? \r\n`repo.stages` is cached property, so repo is storing `file.dvc`. The problem is that when we are re-`add`-ing some file, the chain of calls is `repo.add` -> `repo._create_stages` -> `Stage.create` which detects that `file.dvc` exists and removes it, so it can be created again. \r\n\r\nSo now we don't have `file.dvc` because it was removed by `Stage.create`, we have `file.dvc` in memory as \"new\" stage that we are creating right now, and because `repo`'s stages were not invalidated, we also have `repo.stages`==[`file.dvc`]. That is why we try to check modified graph [here](https://github.com/iterative/dvc/blob/a90d7009f52fa03b45ad5c1d22943cebee387b01/dvc/repo/__init__.py#L173) during `add` we have `self.stages==[`file.dvc`]` and `new_stages==[`file.dvc`]`. While, actually self.stages should not return it (since it was removed by `Stage.create`.\r\n\r\n**TLDR**\r\nWhen re-`add`ing we remove stage file, but we don't invalidate cached `repo.stages` and end up with `repo._collect_graph(['file.dvc', 'file.dvc'])` which has to fail.\r\n\r\nSolution: invalidate `repo.stages` when removing the stage file.":1,"Yeah this seems like a good feature request to me.\r\n\r\nI've never tried nesting BaseSettings classes but it seems totally reasonable to me and I don't see any reason it shouldn't work like this. (But maybe @samuelcolvin does?)\r\n\r\nCould you produce a failing test case or two as a starting point for implementation work?\nSounds good to me. \r\n\r\nDo you want to use dot notation e.g. `db.host`?\r\n\r\nAlso should this work for dictionaries too?\r\n\r\nThis isn't required for v1 since it would be entirely backwards compatible. \nIt's great to hear!\r\n\r\nI think the only logic that needs to change is merging of `self._build_environ()` and `init_kwargs` dictionaries, so I don't see why it wouldn't work for dictionaries.\r\n\r\nShould I start a merge request with the changes and tests or would you like to tackle this one yourselves?\n@idmitrievsky It would be great if you could start a pull request for this. Your approach to the logic sounds right to me (though admittedly there could be some edge cases I'm not considering).":1,"hi, @anjapago , could you try to see if this quick and dirty workaround helps:\r\n\r\npip install git+https://github.com/shcheklein/dvc@workaround-3473\r\n\r\n(use it this way `dvc get --rev=feb20gem https://github.com/iterative/blog yarn.lock` - always specify `--rev`)\nMore context from the discussion. Full clone takes ~20min which makes commands like `dvc get`, `dvc list`, `dvc import` as well as `dvc.api` effectively unusable.\nRight. Ping myself :)":1,"@gcoter Sounds good! Let's do that!\r\n\r\nUnfortunately, we don't have the capacity for this right now as well :slightly_frowning_face: So it might have to wait until someone has time for it. \n@gcoter Btw, i think you can already do that using your ssh config:\r\n```\r\nHost example.com\r\n ForwardAgent no\r\n```\nHi @efiop, thanks for your answer! I tried to modify `ForwardAgent` but it doesn't seem to work in my case...\nI will try to make the PR myself":1,"@mroutis could you give more context please? \nSure, @shcheklein , let me edit the description :sweat_smile: . \n@mroutis @Suor Is this still relevant? There was some dedup optimization in brancher.\n@efiop , I'd say it is a priority 3 and more like an enhancement.\r\n\r\n```bash\r\ngit init\r\ndvc init\r\ndvc run -m foo 'echo 100 > foo'\r\ngit add -A\r\ngit commit -m \"metrics foo\"\r\n```\r\n\r\n```console\r\n$ dvc metrics show --all-branches\r\nworking tree:\r\n\tfoo: 100\r\nmaster:\r\n\tfoo: 100\r\n```\r\n\r\nSo, the problem is that `working tree` is being returned when you can tell that there's no difference between the current branch and the working tree (because _the HEAD is clean_ -- not sure if I'm using the correct terminology :sweat_smile: )\nOther approach is adding \"working tree\" with a comma, same as dup branches or tags. \nVotes split on priority and whether include it into the next sprint: +1s from @mroutis and @Suor, -1 from @efiop and @pared. \nGoes as bonus for next sprint if we go well. \nJust discovered that `--all-branches` and friends are noop for `dvc status` when neither `cloud` nor `remote` option is specified. So:\r\n- this issue is only about `metrics show`, other commands don't care\r\n- should fail or show a warning on a noop option use.\n@Suor Indeed, it only affects status if `-c` is also specified. So need a group of mutually required (or whatever it is called) flags there. ":1,"this is not a bug but a feature request.":1,"@efiop , `destroy` should work as `rm -rf .dvc`?\r\n\r\nI was thinking about adding the `--all` option that will work as `rm -rf .dvc *.dvc Dvcfile`.\r\n\r\nIs that the expected behavior?\nCurrently `destroy` removes both .dvc/ directory and *.dvc files with their outputs. Starting from 1.0, it should leave outputs intact(in symlink case, it should remove symlinks and replace them by copies), so that user at least has his data back.\nWe should use `unprotect` as described in https://github.com/iterative/dvc/issues/1802":1,"Good idea, @mattlbeck!\r\n\r\nI'm curious if you see benefits beyond ease of use for doing this over inserting the output directly as a code block like:\r\n\r\n````\r\necho '```' >> report.md\r\ndvc exp show >> report.md\r\necho '```' >> report.md\r\n````\n@dberenbaum Honestly hadn't thought of placing inside of a code block. Presumably this only works with `--no-pager`?\r\n\r\nWithout having properly tested this, the only additional benefit of a `--show-md` I can think of is that it would look a bit nicer.\nGood to know. Would you be interested in either trying that workaround and letting us know how it works, or else contributing the `--show-md` option?\nNo problem, I will either submit a PR or close this ticket depending on the outcome.\nThank you! One more thought: the leftmost column condenses multiple items of info (compare to the csv output) that might be hard to show in the same way in markdown. \nAlso, no need to close the ticket. This is at least a pattern we should explicitly support for cml and other ci needs. \nRaw markdown would be more likely to benefit from some features of the platform where the table is being rendered (i.e. in Jupyter you would get rows highlighted on hover).\r\n\r\nTaking Github example below, I kind of like the `--show-md` format better.\r\n\r\nExample \"code block workaround\" (this renders awful on VSCode markdown extension preview, btw):\r\n\r\n```\r\n┏━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━┓\r\n┃ Experiment ┃ Created ┃ loss ┃ accuracy ┃ train.batch_size ┃ train.hidden_units ┃ train.dropout ┃ train.num_epochs ┃ train.lr ┃ train.conv_activation ┃\r\n┡━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━┩\r\n│ workspace │ - │ 0.26484 │ 0.9038 │ 128 │ 64 │ 0.4 │ 10 │ 0.001 │ relu │\r\n│ main │ Sep 14, 2021 │ 0.26484 │ 0.9038 │ 128 │ 64 │ 0.4 │ 10 │ 0.001 │ relu │\r\n│ 5bcd44f │ Sep 01, 2021 │ 0.25026 │ 0.9095 │ 128 │ 64 │ 0.4 │ 10 │ 0.001 │ relu │\r\n│ b06a6ba │ Aug 31, 2021 │ 0.25026 │ 0.9095 │ 128 │ 64 │ 0.4 │ 10 │ 0.001 │ relu │\r\n│ d34fd8c │ Aug 30, 2021 │ 0.30741 │ 0.8929 │ 128 │ 64 │ 0.4 │ 10 │ 0.01 │ relu │\r\n│ 02b68b7 │ Aug 29, 2021 │ 0.44604 │ 0.8483 │ 128 │ 64 │ 0.4 │ 10 │ 0.01 │ relu │\r\n│ 5337519 │ Aug 28, 2021 │ - │ - │ - │ - │ - │ - │ - │ - │\r\n└────────────┴──────────────┴─────────┴──────────┴──────────────────┴────────────────────┴───────────────┴──────────────────┴──────────┴───────────────────────┘\r\n```\r\n\r\nPure markdown:\r\n\r\n| Experiment | Created | loss | accuracy | train.batch_size | train.hidden_units | train.dropout | train.num_epochs | train.lr | train.conv_activation |\r\n|--------------|--------------|---------|------------|--------------------|----------------------|-----------------|--------------------|------------|-------------------------|\r\n| workspace | - | 0.26484 | 0.9038 | 128 | 64 | 0.4 | 10 | 0.001 | relu |\r\n| main | Sep 14, 2021 | 0.26484 | 0.9038 | 128 | 64 | 0.4 | 10 | 0.001 | relu |\r\n| 5bcd44f | Sep 01, 2021 | 0.25026 | 0.9095 | 128 | 64 | 0.4 | 10 | 0.001 | relu |\r\n| b06a6ba | Aug 31, 2021 | 0.25026 | 0.9095 | 128 | 64 | 0.4 | 10 | 0.001 | relu |\r\n| d34fd8c | Aug 30, 2021 | 0.30741 | 0.8929 | 128 | 64 | 0.4 | 10 | 0.01 | relu |\r\n| 02b68b7 | Aug 29, 2021 | 0.44604 | 0.8483 | 128 | 64 | 0.4 | 10 | 0.01 | relu |\r\n| 5337519 | Aug 28, 2021 | - | - | - | - | - | - | - | - |\r\n":1,"\nI'll look, if this requires breaking changes it can go in v1, if not I guess it could go in v0.32.2\nWeirdly doesn't happen if you remove remove `Optional[]`.":1,"Would it make sense for `type=\"dl\"` to generate:\r\n\r\n```yaml\r\nstages:\r\n train:\r\n cmd: python train.py\r\n deps:\r\n - train.py\r\n metrics:\r\n - dvclive.json:\r\n cache: false\r\n plots:\r\n - dvclive/scalars:\r\n cache: false\r\n```\r\n\r\nWhere `dvclive` would be replaced by the value passed to `--live` (if any) ?\ncc @dberenbaum @skshetry \n@daavoo Would you be up to contribute this change?\n> @daavoo Would you be up to contribute this change?\r\n\r\nIf you all agree on proposed workaround, yes":1,"Would it make sense for `type=\"dl\"` to generate:\r\n\r\n```yaml\r\nstages:\r\n train:\r\n cmd: python train.py\r\n deps:\r\n - train.py\r\n metrics:\r\n - dvclive.json:\r\n cache: false\r\n plots:\r\n - dvclive/scalars:\r\n cache: false\r\n```\r\n\r\nWhere `dvclive` would be replaced by the value passed to `--live` (if any) ?\ncc @dberenbaum @skshetry \n@daavoo Would you be up to contribute this change?\n> @daavoo Would you be up to contribute this change?\r\n\r\nIf you all agree on proposed workaround, yes\nSorry, I think I was too quick to approve this. Looking at it now, it seems more transparent to:\r\n\r\n* Make `--live` optional and off by default for both `default` and `dl`. This makes `--type dl` seem less useful since it only differs by including checkpoints, but I don't think it's bad that we are unifying and simplifying.\r\n* Make `--live` independent from whether to include `checkpoints: true`.\r\n* Make `--live` mutually exclusive with `metrics` and `plots`.\r\n* If `--live` and `--interactive` are both present, don't ask for `metrics` or `plots`.\r\n\r\nThis hopefully makes it a simple convenience to replace `metrics` and `plots` easily when using dvclive. \r\n\r\nAgain, sorry for not catching this earlier.\r\n\r\nThoughts?":1,"Hi @nat212 \r\nYour minimal example is not complete. Can you add what is actually run?\r\n(I'm surprised your `CloudConfig` is not a `BaseSettings`)\nSure, I've updated it to include a little more detail. The example in the [Settings management docs](https://pydantic-docs.helpmanual.io/usage/settings/) use a `BaseModel` as the submodel, so that is why it is not a `BaseSettings`. Would it be better to make it a `BaseSettings`?\nIf you look at the example, you can see that the `BaseModel` doesn't contain any secret and it's explain in the comment that you need to stringify a dict to override it.\r\nIf you want to write only your `client_secret` into `/run/secrets/client_secret`, `CloudConfig` needs to be an `BaseSettings` and have the right `Config` declared\nYes, the issue is that when using a JSON string as an environment variable, it sets the `cloud_config` model successfully. So the environment variable should get detected by the `Settings` model, and parsed as a JSON string, and used to set the `cloud_config` model. However, if I have a secret called `cloud_config` with the same JSON string, the secret just gets read as plain text, and the `Settings` model complains that it has not received a dict":1,"> Another interesting point that `dvc status` does exactly this - it analyzes actual changes.\r\n\r\nExactly. We decided not to cover this case in the first `diff` implementation and created #3255.\nDuplicate of #3255\nI would not consider this as a duplicate of that larger discussion (basically what status should be doing). They are relevant indeed, but if we more or less understand what do we expect from `dvc diff` let's focus on this one? And I would vote for showing the changes (uncommitted in the DVC sense).\nReopening this for now, let's keep both since it's an important issue to discuss and solve if needed. If we are lucky enough to close both at once that would be a great outcome.\n> Seems like maybe this issue and #3385 could be merged? I'm just seeing lots of diff issues, may be hard to manage them.":1,"This should get resolved by something similar to `dvc exp gc ...` which will garbage collect un-needed experiments and their related cache artifacts. This needs to be implemented before release, but is not a priority issue yet\r\n\r\nTasks:\r\n\r\n* [x] `dvc exp gc` for garbage collecting experiments\r\n* [ ] `dvc gc -e/--exp` to preserve used experiment cache during regular gc\nUsed object collection for experiments will depend on the exp sharing feature, since collection will require walking the custom exp refs rather than `.dvc/experiments` branches. So the remaining `gc` task is on hold until exp sharing is done.":1,"This isn't a bug, but a feature request.\r\n\r\n```py\r\nclass Config:\r\n fields = {\"b\": { \"required\": False}}\r\n```\r\n\r\nWouldn't work on a normal model.\r\n\r\nI think what you want is better support for the `field()` function from dataclasses. If so, please describe exactly what parts of `field()` usage you'd like to support in pydantic. Is it just the default in schema?\nThanks for clarifying!\r\nOnly handling `default*` in schema is what we need to support. However, it would be nice to have a stable interface for `.__pydantic_model__`. I'm not sure if this might change. \n`__pydantic_model__` won't change, we should document it.\r\n\r\nIt would be wonderful if you could create a PR for that. :smile: \ni'm going for it":1,"Good points, @skshetry.\r\n\r\nAs you pointed out, none of the silent examples implemented now (`metrics/params/plots`) make much sense. Interested to hear if there's a use case for being silent for those since their purpose is to show output.\r\n\r\nOtherwise, it seems like we might consistently apply the first meaning to all of the commands. As you again noted, we can always add `--silent` if necessary.\n@gcoter, could you please share more information about the docker container, and why is it running on `ascii` encoding?\r\n\r\n`print` has the same behaviour with ASCII encoding, if you try to print other Unicode characters. If it's in your control, I'd suggest you use another `utf-8` encoding or set `PYTHONIOENCODING=utf-8` envvar inside docker. \r\n\r\nHere, the change is simple as we could detect them easily, but it might not always be the case (considering we have some big changes coming in the CLI/UI). What if the metrics themselves are in the unicode?\nHi @skshetry, `PYTHONIOENCODING=utf-8` did solve the problem! I don't know why ascii is being used by default, but I will use your trick :slightly_smiling_face: Thank you\nI observed this error in one of our Katacoda containers as well. Related with iterative/katacoda-scenarios#55\r\n\r\nThis happens when `LANG` is not set to some `UTF-8` \r\n\r\n```\r\n~/example-get-started$ locale\r\nLANG=\r\nLANGUAGE=\r\nLC_CTYPE=\"POSIX\"\r\nLC_NUMERIC=\"POSIX\"\r\nLC_TIME=\"POSIX\"\r\nLC_COLLATE=\"POSIX\"\r\nLC_MONETARY=\"POSIX\"\r\nLC_MESSAGES=\"POSIX\"\r\nLC_PAPER=\"POSIX\"\r\nLC_NAME=\"POSIX\"\r\nLC_ADDRESS=\"POSIX\"\r\nLC_TELEPHONE=\"POSIX\"\r\nLC_MEASUREMENT=\"POSIX\"\r\nLC_IDENTIFICATION=\"POSIX\"\r\nLC_ALL=\r\n```\r\n\r\nwhen I set \r\n\r\n```\r\nexport LANG=C.UTF-8\r\n```\r\n\r\nI don't get this error. \r\n\r\nYou can get possible values with\r\n\r\n```\r\nlocale -a\r\n```\r\n\r\nand add all locales on a Debian/Ubuntu container with \r\n\r\n```\r\napt install locales-all\r\n```\r\n\r\nThis may be a more general fix than setting `PYTHONIOENCODING`. \r\n\r\nYou can also set these in your `Dockerfile`: \r\n\r\n```\r\n...\r\nENV LANG C.UTF-8\r\nENV LC_ALL C.UTF-8\r\n...\r\n```\r\n\r\n@gcoter \nThank you @iesahin for the fix, I think it is indeed more general :slightly_smiling_face: ":1,"Seems like we don't pass the config options to the real filesystem, working on a PR\r\nhttps://github.com/iterative/dvc/blob/378486dbf53271ec894b4380f60ec02dc3351516/dvc/fs/http.py#L105-L111":1,"I don't think we want more config options unless absolutely required.\r\n\r\nThe solution here is to allow `schema_extra` to be a function which can mutate the schema, #889. Happy to accept a PR for that. ":1,"Reproducible with\r\n\r\n```py\r\ndef test_show_with_preexisting_tags(tmp_dir, scm):\r\n tmp_dir.gen(\"foo\", \"foo\")\r\n scm.add(\"foo\")\r\n scm.commit(\"init\")\r\n scm.tag(\"no-metrics\")\r\n\r\n tmp_dir.gen({\"subdir\": {}})\r\n subrepo_dir = tmp_dir / \"subdir\"\r\n with subrepo_dir.chdir():\r\n dvc = Repo.init(subdir=True)\r\n scm.commit(\"init dvc\")\r\n\r\n subrepo_dir.gen(\"metrics.yaml\", \"foo: 1\")\r\n dvc.run(metrics=[\"metrics.yaml\"], single_stage=True)\r\n\r\n scm.add(\r\n [\r\n str(subrepo_dir / \"metrics.yaml\"),\r\n str(subrepo_dir / \"metrics.yaml.dvc\"),\r\n ]\r\n )\r\n scm.commit(\"init metrics\")\r\n scm.tag(\"v1\")\r\n\r\n assert dvc.metrics.show(all_tags=True) == {\r\n \"working tree\": {\"subdir/metrics.yaml\": {\"foo\": 1}},\r\n \"v1\": {\"subdir/metrics.yaml\": {\"foo\": 1}},\r\n }\r\n```\r\n\r\nAlso worth noting it is only reproducible if the tag was for a commit before DVC was initialized AND DVC was initialized with `subdir=True`. I cannot reproduce the issue if DVC is initialized with `subdir=False`.":1,"I'll pick this up and submit a PR soon.":1,"@pared what do you think?\n@Suor \r\n It makes sense to me, so we kind of want to detach data loading from data processing and from template handling, and \"join\" those elements in the latest moment possible, so that viewer can have more control over the plot creation process?\n@pared yes, I want this to be separate to be able to store some raw data, probably in normalized intermediate format, and then finish the processing later.\r\n\r\nThe thing is that parsing a repo and generating plots are completely separate for me. Plots are generated on user request and so the chosen commits can't be known beforehand.\n@Suor that makes total sense\nAnother thing we should consider - stop treating templates as strings, these are JSONs or structured data. Now their serialization is baked in, so I need to encode data, parse and reencode to serve it the way I want. Otherwise I need to serve it as string with all those unneeded spaces and newlines.\n@Suor that is a good point, but there was a reason why we are not treating them as JSON:\r\nIt is possible now to create custom template that is not JSON (for example HTML page filled with placeholders for data) and still be able to work with that kind of template.\r\n\r\nI agree that JSON would be much more convenient to work with. We could actually get rid of anchors, because Vega template structure is quite constant, so data will always be under son[\"data\"][\"values\"] and so on...\r\n\r\nI think we could use @dmpetrov input on this one.":1,"Try `m = Model(self_=“some URL”)` with the rest of the code as is (from the with-alias example).\n@dmontagu the problem is deserialization. \r\nI get a json body in a request with {\"self\": \"someurl\"} in it and I can't create a model instance without preparsing the body to replace \"self\" with \"self_\"\r\n\r\nIf pydantic had a way to \"preload\" like marshmallow does it might help?\nWould #624 help? \n@samuelcolvin would having a load alias give you a different function to call that didn’t already have `self` as a parameter? I don’t see that discussed in the linked issue.\r\n\r\nGiven how frequently the specific field name `self` seems to be desired (I think it has meaning in certain json schema specs), might it make sense to rename `self` to `__self__` or something else equally unlikely to cause conflicts in `BaseModel.__init__` (and perhaps other methods expecting field names as kwargs)? Unless the approach of calling `Model(**data)` to initialize a model becomes discouraged, it feels unfortunate to me to have to change the model initialization code just to support a reasonably-named field (reasonable in a JSON context, rather than a python context anyway).\n@samuelcolvin not sure whether splitting out load and dump aliases changes the fact.\r\n\r\nSeems to me being able to avoid `Model(**data)` to initialize a model avoids all sorts of possible issues with name clashes. Moving this to a 'marshmallow' style `load(...)` has other advantages for pre-formatting data etc. but I don't really want to suggest any specific approach here not knowing the history and roadmap of the project well enough, rather just if there is a way to handle this.\r\n\r\nRegardless, as @dmontagu points out, 'self' and even 'kwargs' should be possible to specify in a schema.":1,"I can probably look at this this weekend... I don't think many people are using generics in 3.9 yet\nYes you're right but supporting generic types seems to be rational, According to [PEP585](https://www.python.org/dev/peps/pep-0585/) legacy typing.List , typing.Dict , ... are **Deprecated** since python3.9 and will be removed from python.":1,"@gthb Thanks for reporting this! Looks like we are not using post-checkout parameters that are being passed to us by git and are indicating if we are checking out a specific file or a branch/tag/etc. https://git-scm.com/docs/githooks#_post_checkout We need to adjust our hook accordingly.\n@gthb Looks like the third parameter is just what we need. You could try modifying your git hook by hand to see if that will do the job for you. If it does, please let us know, and maybe consider submitting a PR, we really appreciate your contributions :slightly_smiling_face: If it doesn't work, please ping us as well.":1,"I co-discovered this issue. 100% True\nHi and thanks for reporting\r\n`BaseSettings` inherits from `BaseModel` with other defaults for the config including `validate_all` which is set to True by default.\r\nAs explained in the [warning section in the doc](https://pydantic-docs.helpmanual.io/usage/models/#field-with-dynamic-default-value), to validate the default value of a field, we actually need to call the `default_factory`.\r\nI guess you could find a workaround in your default factory or set `validate_all = False`\r\n\r\n```py\r\nclass A(BaseSettings, validate_all=False):\r\n pass\r\n```\r\n\r\nWe could also allow people to not validate values with `default_factory`\r\nHope it helps in the meantime\n@PrettyWood \r\n\r\nThank you, that works for now. It's still kind of strange though, I believe it should be possible to validate all values (including the default ones) on initialization. Might be better to hide this functionality under some option (`validate_on_init`), too.":1,"Would you accept this change as a PR?\nPR sounds good.\r\n\r\nI guess no harm in including this in v1 since it's so small.\nGiven this breaks a lot of fastapi tests (presumably mostly just requiring modifications to generated schemas), @tiangolo do you have any issues with this change?\nOkay, let's wait until v1.1.\nexcept I guess if it's a breaking change, it would make more sense to include it in v1.\r\n\r\n@tiangolo what do you think?\n@tiangolo would you like me to also update the title field in the schema unit tests to ensure that's all it is?\nI'm pro this change, it seems like a no brainer, especially if it's a breaking change, to include it in v1.\r\n\r\n@skewty please fix the PR and I'll merge.":1,"It seems there are two problems, the first one is what you mentioned in the issue, which aparently can be solved by using the legacy types in Typing. I believe that is problematic since PEP 585 was supposed to make list and List equal(at least from my understanding). \r\n\r\n```\r\nfrom typing import TypedDict, List\r\n\r\nfrom pydantic import BaseModel\r\n\r\n\r\nclass A(TypedDict):\r\n a: List['A']\r\n\r\n\r\nclass B(BaseModel):\r\n a: A\r\n```\r\n\r\nThe problem with this code now becomes a maximum recursion depth. But that's a completly different problem. \r\n```\r\n...\r\n File \"/home/luccas/Projetos/pydantic/pydantic/typing.py\", line 355, in is_literal_type\r\n return Literal is not None and get_origin(type_) is Literal\r\n File \"/home/luccas/Projetos/pydantic/pydantic/typing.py\", line 124, in get_origin\r\n return _typing_get_origin(tp) or getattr(tp, '__origin__', None)\r\n File \"/usr/lib/python3.9/typing.py\", line 1510, in get_origin\r\n if isinstance(tp, (_BaseGenericAlias, GenericAlias)):\r\nRecursionError: maximum recursion depth exceeded in __instancecheck__\r\n```\r\nThis for example works\r\n```\r\nfrom typing import TypedDict, List\r\nfrom pydantic import BaseModel\r\n\r\n\r\nclass A(TypedDict):\r\n foo: int\r\n bar: str\r\n\r\n\r\nclass B(BaseModel):\r\n a: List['A']\r\n```\r\n\r\nOutput of python -c \"import pydantic.utils; print(pydantic.utils.version_info())\":\r\n\r\n```\r\n pydantic version: 1.8.2\r\n pydantic compiled: False\r\n install path: /home/luccas/Projetos/pydantic/pydantic\r\n python version: 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]\r\n platform: Linux-5.4.148-1-MANJARO-x86_64-with-glibc2.33\r\n optional deps. installed: ['devtools', 'dotenv', 'email-validator', 'typing-extensions']\r\n\r\n\r\n```\r\n":1,"@friggog Please post verbose log for that command. E.g. `dvc fetch -v`. Also, please post `dvc doctor` output.\n```\r\nDVC version: 2.0.3 (pip)\r\n---------------------------------\r\nPlatform: Python 3.7.9 on Windows-10-10.0.14393-SP0\r\nSupports: azure, hdfs, http, https, ssh\r\nCache types: \r\nCaches: local\r\nRemotes: azure\r\nWorkspace directory: NTFS on D:\\\r\nRepo: dvc (subdir), git\r\n2021-03-05 16:23:34,513 DEBUG: Check for update is enabled.\r\n2021-03-05 16:23:34,540 DEBUG: Trying to spawn '['daemon', '-q', 'updater']'\r\n2021-03-05 16:23:34,590 DEBUG: Spawned '['daemon', '-q', 'updater']'\r\n2021-03-05 16:23:34,619 DEBUG: Checking if stage 'FILE_1' is in 'dvc.yaml'\r\n2021-03-05 16:23:35,958 DEBUG: Checking if stage 'FILE_2' is in 'dvc.yaml'\r\n2021-03-05 16:23:35,959 DEBUG: Checking if stage 'FILE_3' is in 'dvc.yaml'\r\n2021-03-05 16:23:35,960 DEBUG: Checking if stage 'DIR\\FILE_4' is in 'dvc.yaml'\r\n2021-03-05 16:23:36,759 DEBUG: Preparing to download data from 'azure://CONTAINER_NAME'\r\n2021-03-05 16:23:36,760 DEBUG: Preparing to collect status from azure://CONTAINER_NAME\r\n2021-03-05 16:23:36,760 DEBUG: Collecting information from local cache...\r\n2021-03-05 16:23:36,760 DEBUG: Collecting information from remote cache...\r\n2021-03-05 16:23:36,761 DEBUG: Matched '0' indexed hashes\r\n2021-03-05 16:23:36,761 DEBUG: Querying 1 hashes via object_exists\r\n2021-03-05 16:23:36,765 DEBUG: failed to pull cache for 'DIR'\r\n2021-03-05 16:23:35,960 DEBUG: Checking if stage 'DIR\\FILE_5' is in 'dvc.yaml'\r\n2021-03-05 16:23:36,759 DEBUG: Preparing to download data from 'azure://CONTAINER_NAME'\r\n2021-03-05 16:23:36,760 DEBUG: Preparing to collect status from azure://CONTAINER_NAME\r\n2021-03-05 16:23:36,760 DEBUG: Collecting information from local cache...\r\n2021-03-05 16:23:36,760 DEBUG: Collecting information from remote cache...\r\n2021-03-05 16:23:36,761 DEBUG: Matched '0' indexed hashes\r\n2021-03-05 16:23:36,761 DEBUG: Querying 1 hashes via object_exists\r\n2021-03-05 16:23:36,765 DEBUG: failed to pull cache for 'DIR'\r\n\r\n... many times ....\r\n\r\n-----------------------------------------------------------\r\n2021-03-05 16:24:56,944 ERROR: failed to download 'azure://CONTAINER_NAME/e4/deb4d2426b0b5fd5919319e3eca7e7' to '.dvc\\cache\\e4\\deb4d2426b0b5fd5919319e3eca7e7' - There is no current event loop in thread 'ThreadPoolExecutor-127_2'.\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\remote\\base.py\", line 35, in wrapper\r\n func(from_info, to_info, *args, **kwargs)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\fs\\base.py\", line 280, in download\r\n return self._download_file(from_info, to_info, name, no_progress_bar,)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\fs\\base.py\", line 332, in _download_file\r\n from_info, tmp_file, name=name, no_progress_bar=no_progress_bar\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\fs\\fsspec_wrapper.py\", line 104, in _download\r\n with self.open(from_info, \"rb\") as fobj:\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\fs\\fsspec_wrapper.py\", line 48, in open\r\n return self.fs.open(self._with_bucket(path_info), mode=mode)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\fsspec\\spec.py\", line 943, in open\r\n **kwargs,\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\adlfs\\spec.py\", line 1445, in _open\r\n **kwargs,\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\adlfs\\spec.py\", line 1509, in __init__\r\n fs.service_client.get_container_client(self.container_name)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\azure\\storage\\blob\\aio\\_blob_service_client_async.py\", line 591, in get_container_client\r\n key_resolver_function=self.key_resolver_function, loop=self._loop)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\azure\\storage\\blob\\aio\\_container_client_async.py\", line 118, in __init__\r\n **kwargs)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\azure\\storage\\blob\\_container_client.py\", line 149, in __init__\r\n super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\azure\\storage\\blob\\_shared\\base_client.py\", line 114, in __init__\r\n self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\azure\\storage\\blob\\_shared\\base_client_async.py\", line 71, in _create_pipeline\r\n self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\azure\\core\\pipeline\\policies\\_authentication_async.py\", line 24, in __init__\r\n self._lock = asyncio.Lock()\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\asyncio\\locks.py\", line 161, in __init__\r\n self._loop = events.get_event_loop()\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\asyncio\\events.py\", line 644, in get_event_loop\r\n % threading.current_thread().name)\r\nRuntimeError: There is no current event loop in thread 'ThreadPoolExecutor-127_2'.\r\n\r\n... many times ...\r\n\r\n------------------------------------------------------------\r\n2021-03-05 16:24:56,993 ERROR: failed to fetch data from the cloud - 53 files failed to download\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\command\\data_sync.py\", line 89, in run\r\n run_cache=self.args.run_cache,\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\repo\\__init__.py\", line 49, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.9\\x64\\lib\\site-packages\\dvc\\repo\\fetch.py\", line 77, in fetch\r\n raise DownloadError(failed)\r\ndvc.exceptions.DownloadError: 53 files failed to download\r\n------------------------------------------------------------\r\n2021-03-05 16:24:56,997 DEBUG: Analytics is enabled.\r\n2021-03-05 16:24:57,000 DEBUG: Trying to spawn '['daemon', '-q', 'analytics', 'C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\tmpy3i7nnwh']'\r\n2021-03-05 16:24:57,052 DEBUG: Spawned '['daemon', '-q', 'analytics', 'C:\\\\Users\\\\VSSADM~1\\\\AppData\\\\Local\\\\Temp\\\\tmpy3i7nnwh']'\r\nUnclosed client session\r\nclient_session: \r\n```\r\n\r\nI'm pretty sure it is only files that are within a directory that is tracked (rather than the file itself being tracked) that are failing.\r\n\r\nAlso tried a fresh clone on my local machine and I'm getting auth errors with DefaultAzureCredentials :( \nUpdate that this problem persists on the latest version (2.0.13) and happens both locally and on CI\r\n\r\nHere is the traceback (after fixing DefaultAzureCredential see #5725)\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"python_lib\\site-packages\\adlfs\\spec.py\", line 483, in do_connect\r\n for cred in creds\r\n File \"python_lib\\site-packages\\adlfs\\spec.py\", line 484, in \r\n if cred is not None\r\n File \"python_lib\\site-packages\\azure\\storage\\blob\\aio\\_blob_service_client_async.py\", line 118, in __init__\r\n **kwargs)\r\n File \"python_lib\\site-packages\\azure\\storage\\blob\\_blob_service_client.py\", line 126, in __init__\r\n super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)\r\n File \"python_lib\\site-packages\\azure\\storage\\blob\\_shared\\base_client.py\", line 114, in __init__\r\n self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)\r\n File \"python_lib\\site-packages\\azure\\storage\\blob\\_shared\\base_client_async.py\", line 71, in _create_pipeline\r\n self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)\r\n File \"python_lib\\site-packages\\azure\\core\\pipeline\\policies\\_authentication_async.py\", line 24, in __init__\r\n self._lock = asyncio.Lock()\r\n File \"python_lib\\asyncio\\locks.py\", line 161, in __init__\r\n self._loop = events.get_event_loop()\r\n File \"python_lib\\asyncio\\events.py\", line 644, in get_event_loop\r\n % threading.current_thread().name)\r\nRuntimeError: There is no current event loop in thread 'ThreadPoolExecutor-0_0'.\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"python_lib\\site-packages\\dvc\\main.py\", line 55, in main\r\n ret = cmd.run()\r\n File \"python_lib\\site-packages\\dvc\\command\\data_sync.py\", line 40, in run\r\n glob=self.args.glob,\r\n File \"python_lib\\site-packages\\dvc\\repo\\__init__.py\", line 49, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"python_lib\\site-packages\\dvc\\repo\\pull.py\", line 38, in pull\r\n run_cache=run_cache,\r\n File \"python_lib\\site-packages\\dvc\\repo\\__init__.py\", line 49, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"python_lib\\site-packages\\dvc\\repo\\fetch.py\", line 63, in fetch\r\n used, jobs, remote=remote, show_checksums=show_checksums,\r\n File \"python_lib\\site-packages\\dvc\\data_cloud.py\", line 92, in pull\r\n show_checksums=show_checksums,\r\n File \"python_lib\\site-packages\\dvc\\remote\\base.py\", line 56, in wrapper\r\n return f(obj, *args, **kwargs)\r\n File \"python_lib\\site-packages\\dvc\\remote\\base.py\", line 491, in pull\r\n download=True,\r\n File \"python_lib\\site-packages\\dvc\\remote\\base.py\", line 328, in _process\r\n download=download,\r\n File \"python_lib\\site-packages\\dvc\\remote\\base.py\", line 176, in _status\r\n md5s, jobs=jobs, name=str(self.fs.path_info)\r\n File \"python_lib\\site-packages\\dvc\\remote\\base.py\", line 132, in hashes_exist\r\n return indexed_hashes + self.odb.hashes_exist(list(hashes), **kwargs)\r\n File \"python_lib\\site-packages\\dvc\\objects\\db\\base.py\", line 380, in hashes_exist\r\n remote_hashes = self.list_hashes_exists(hashes, jobs, name)\r\n File \"python_lib\\site-packages\\dvc\\objects\\db\\base.py\", line 338, in list_hashes_exists\r\n ret = list(itertools.compress(hashes, in_remote))\r\n File \"python_lib\\concurrent\\futures\\_base.py\", line 598, in result_iterator\r\n yield fs.pop().result()\r\n File \"python_lib\\concurrent\\futures\\_base.py\", line 435, in result\r\n return self.__get_result()\r\n File \"python_lib\\concurrent\\futures\\_base.py\", line 384, in __get_result\r\n raise self._exception\r\n File \"python_lib\\concurrent\\futures\\thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"python_lib\\site-packages\\dvc\\objects\\db\\base.py\", line 329, in exists_with_progress\r\n ret = self.fs.exists(path_info)\r\n File \"python_lib\\site-packages\\dvc\\fs\\fsspec_wrapper.py\", line 94, in exists\r\n return self.fs.exists(self._with_bucket(path_info))\r\n File \"python_lib\\site-packages\\funcy\\objects.py\", line 50, in __get__\r\n return prop.__get__(instance, type)\r\n File \"python_lib\\site-packages\\funcy\\objects.py\", line 28, in __get__\r\n res = instance.__dict__[self.fget.__name__] = self.fget(instance)\r\n File \"python_lib\\site-packages\\dvc\\fs\\azure.py\", line 114, in fs\r\n file_system = AzureBlobFileSystem(**self.fs_args)\r\n File \"python_lib\\site-packages\\fsspec\\spec.py\", line 66, in __call__\r\n obj = super().__call__(*args, **kwargs)\r\n File \"python_lib\\site-packages\\adlfs\\spec.py\", line 403, in __init__\r\n self.do_connect()\r\n File \"python_lib\\site-packages\\adlfs\\spec.py\", line 500, in do_connect\r\n raise ValueError(f\"unable to connect to account for {e}\")\r\nValueError: unable to connect to account for There is no current event loop in thread 'ThreadPoolExecutor-0_0'.\r\n```\r\n\r\n```\r\n2021-03-30 08:55:25,226 DEBUG: Version info for developers:\r\nDVC version: 2.0.13 (pip)\r\n---------------------------------\r\nPlatform: Python 3.7.10 on Windows-10-10.0.19041-SP0\r\nSupports: azure, http, https\r\nCache types: hardlink\r\nCache directory: NTFS on C:\\\r\nCaches: local\r\nRemotes: azure\r\nWorkspace directory: NTFS on C:\\\r\nRepo: dvc (subdir), git\r\n```\nAfter some further investigation, this issue seems to arise because `adlfs` uses `azure.identity.aio` and `azure.storage.blob.aio ` which both use `asyncio`. Because DVC uses `ThreadPoolExecutor` at various points (e.g. [here](https://github.com/iterative/dvc/blob/a1aab35f997a1f9175f945f8266cf4038a5715b8/dvc/remote/base.py#L364) and [here](https://github.com/iterative/dvc/blob/a1aab35f997a1f9175f945f8266cf4038a5715b8/dvc/fs/base.py#L306)) without any handling for `asyncio` this causes the failures above where there is no event loop available to `aysncio` within threads of the `ThreadPoolExecutor`. Disabling uses of `ThreadPoolExecutor` within DVC results in pulls from Azure succeeding. \r\n\r\n@isidentical @efiop \r\nI'm a little confused why nobody else seems to have this issue? Also not sure what is the best path for a reliable fix - I have no experience with `asyncio`...\n@friggog, could you please try installing adlfs from the master? It could have been fixed in upstream.\r\n```console\r\n$ pip install git+https://github.com/dask/adlfs.git\r\n```\r\n\nNope same error @skshetry \n@isidentical have you managed to repro this at all?\n> @isidentical have you managed to repro this at all?\r\n\r\nNo, unfortunately, I haven't encounter this error. If you can share your specific conditions (which login method are you using, which operation it is exactly failing etc) I'll try to work on it. \nI'm using the DefaultAzureCredential with cached azure CLI credentials (run `az login` before DVC commands) and also ServicePrincipleAuth on Devops CI. Both have the same error with `dvc fetch some_files`\nI've reproduced your issue @friggog via using `DefaultCredentials`. The fix is simple though it is a bit in the gray line on whether we should fix it on the dvc itself or on the adlfs so I've opened a question on the adlfs upstream dask/adlfs#218\n@isidentical In the interest of making this functional for DVC users can we fix it here now even if not the perfect solution, then if eventually it gets fixed in adlfs the alterations can be removed from DVC?":1,"Can you post the output of `dvc doctor`? It looks like you are probably using the latest version based on the traceback, but it's helpful for us to know for sure. Also, DVC no longer uses Paramiko (the underlying backend is now `asyncssh`).\r\n\n```\r\n(pyenv3.8.6) joncrall@namek:~$ dvc doctor\r\nDVC version: 2.6.4 (pip)\r\n---------------------------------\r\nPlatform: Python 3.8.6 on Linux-5.4.0-80-generic-x86_64-with-glibc2.2.5\r\nSupports:\r\n\tazure (adlfs = 2021.7.1, knack = 0.8.2, azure-identity = 1.6.0),\r\n\tgdrive (pydrive2 = 1.9.1),\r\n\tgs (gcsfs = 2021.7.0),\r\n\thdfs (pyarrow = 5.0.0),\r\n\twebhdfs (hdfs = 2.5.8),\r\n\thttp (requests = 2.25.1),\r\n\thttps (requests = 2.25.1),\r\n\ts3 (s3fs = 2021.7.0, boto3 = 1.17.49),\r\n\tssh (sshfs = 2021.7.1),\r\n\toss (ossfs = 2021.7.5),\r\n\twebdav (webdav4 = 0.9.0),\r\n\twebdavs (webdav4 = 0.9.0)\r\n```\r\n\r\nAnything out of the ordinary? I did attempt a `pip install dvc -U` before I reported.\nCC @isidentical \n> The main issue being that client_keys was set to /home/joncrall/.ssh/id_personal_ed25519, which is incorrect based on the config.\r\n\r\n> identityfile ~/.ssh/id_kitware_ed25519\r\n\r\nAm I missing something, since the `client_keys` is identical to your `IdentityFile` setting? \nLooks like the `IdentityFile` entry is `id_kitware_...` but asyncssh is using `id_personal_...`\nNow I see the difference, thanks @pmrowla! \r\n\r\n@Erotemic I assume that you have another config that overrides the setting, can you also share that? \r\n\r\nI tried a couple variations;\r\n```\r\nHost example.com\r\n User ubuntu\r\n HostName 1.2.3.4\r\n Port 1234\r\n IdentityFile ~/.ssh/not_default.key\r\n\r\nHost kitware.com\r\n identityfile ~/.ssh/id_personal_ed25519\r\n\r\nHost bigbox bigbox.kitware.com\r\n HostName bigbox.kitware.com\r\n Port 22\r\n User jon.crall\r\n identityfile ~/.ssh/id_kitware_ed25519\r\n ForwardX11 yes\r\n ForwardX11Trusted yes\r\n```\r\n\r\nand in every time, it successfully parsed the correct identityfile;\r\n```\r\n>>> from sshfs.config import parse_config\r\n>>> \r\n>>> print(parse_config(host='bigbox.kitware.com')._options)\r\n{'Hostname': 'bigbox.kitware.com', 'Port': 22, 'User': 'jon.crall', 'IdentityFile': ['~/.ssh/id_kitware_ed25519'], 'ForwardX11Trusted': True}\r\n``` \nI don't have another config file that I know of (but if you know how I could find a potential override if one exists, let me know, I only see the one config).\r\n\r\nHowever, I think I see the problem. At the top of the config I have a separate default IdentityFile entry at the top of the ssh config. I think that is what is causing the issue. (The rest of the lines in the SSH file are all Host entries that point at different machines).\r\n\r\n```\r\nIdentityFile ~/.ssh/id_personal_ed25519\r\n\r\nHost bigbox bigbox.kitware.com\r\n HostName bigbox.kitware.com\r\n Port 22\r\n User jon.crall\r\n identityfile ~/.ssh/id_kitware_ed25519\r\n ForwardX11 yes\r\n ForwardX11Trusted yes\r\n\r\n```\r\n\r\nUsing your example I see this:\r\n\r\n```\r\n>>> from sshfs.config import parse_config\r\n>>> \r\n>>> print(parse_config(host='bigbox.kitware.com')._options)\r\n```\r\n\r\nwhich results in\r\n```\r\n{'IdentityFile': ['~/.ssh/id_personal_ed25519',\r\n '~/.ssh/id_kitware_ed25519'],\r\n 'Hostname': 'bigbox.kitware.com',\r\n 'Port': 22,\r\n 'User': 'jon.crall',\r\n 'ForwardX11Trusted': True}\r\n```\r\n\r\nBoth of the keys show up in the IdentityFile list! I'm wondering if there is something in asyncssh or dvc that is using `config['IdentityFile'][0]` and ignoring the second entry? \nYes! That seems like the issue\r\n\r\nhttps://github.com/iterative/dvc/blob/e5cf9b203df01f13e5568c745906d74bc131235c/dvc/fs/ssh.py#L71-L75\r\n\r\nI am not rally sure about the reason why we choose this behavior (cc: @efiop), and it might simply be something that stuck during the initial times of the SSH (this behavior is kept since paramiko AFAIK);\r\nhttps://github.com/iterative/dvc/blob/5e4de841d12168647bc8c7ec464dfb8a7800ed10/dvc/fs/ssh/__init__.py#L120-L121\r\n\nSo with ssh itself, the behavior seems to be that it will try each identity file until one works. I think this issue has some potential action items that could improve DVC:\r\n\r\n* Write out the user_ssh_config as a DEBUG level log so this is clear when `-v` is given (I think this is necessary)\r\n* If a permission error occurs try the next identity file if another one is available (not sure how contentious this is, but this change would make it work more like ssh itself). \r\n\r\n\n@isidentical Yeah, it just stuck around since https://github.com/iterative/dvc/pull/1965 We could totally consider changing it, even at the cost of backward compatibility, since this is a rather advanced feature and it is already broken. Up to you guys.":1,"See https://github.com/iterative/dvc/issues/7462#issuecomment-1070954669":1,"I think we can expand as needed by users.\nSVG? Maybe TIFF? Uncompressed bitmaps? I'd ask CV people.\nI think that SVG is perhaps a good first addition, as plotting libraries can output that and they are often a bit smaller than PNGs. Having at least one vector format and one raster format would be a good goal. Probably not a first priority, but it would be nice!\nUnless I am missing something, this would only need to update:\r\n\r\nhttps://github.com/iterative/dvc/blob/d7152e360e4770c54645e84ca9b690389d60dd07/dvc/repo/plots/__init__.py#L556":1,"Repro script:\r\n```bash\r\n#!/bin/bash\r\n\r\nset -exu\r\npushd $TMPDIR\r\n\r\nwsp=test_wspace\r\nrep=test_repo\r\n\r\nrm -rf $wsp && mkdir $wsp && pushd $wsp\r\nmain=$(pwd)\r\n\r\nmkdir $rep && pushd $rep\r\n\r\nmkdir data\r\ngit init\r\ndvc init\r\n\r\necho data >> data/file\r\ndvc add data/file\r\ndvc remote add -d str $main/storage\r\n\r\ndvc push\r\n\r\ngit add -A\r\ngit commit -am \"init\"\r\n\r\nrm -rf data/file .dvc/cache\r\n\r\nln -s data data_link\r\ndvc pull -v data_link/file.dvc\r\n```\r\n\r\nThis use case used to work. Seems like \r\nhttps://github.com/iterative/dvc/commit/26008f155901aa590b1a809452f951b2c5d7c6c3\r\nIntroduced the regression\r\n\r\ncc @dtrifiro \r\n":1,"Hi @Persedes !\r\n\r\nWow, that is awful. Investigating right now. Thank you for reporting it! 🙏 \nIndeed, the issue is caused by dvc being confused by the trailing `/` in the `models/mlr/production/`. A workaround for now is to not include the trailing slash. Will send a fix soon...\nReproduction script:\r\n```\r\n#!/bin/bash\r\n\r\nrm -rf erepo repo git_repo\r\nmkdir erepo repo git_repo\r\n\r\nmaindir=$(pwd)\r\nset -ex\r\npushd git_repo\r\ngit init --bare --quiet\r\npopd\r\n\r\npushd erepo\r\ngit init --quiet\r\ngit remote add origin $maindir/git_repo\r\ndvc init -q\r\n\r\nmkdir models\r\necho model >> models/1\r\n\r\ndvc add models\r\ngit add -A\r\ngit commit -am \"add stuff\"\r\n\r\ngit push origin master\r\nrm models/1\r\nrm -rf .dvc/cache\r\n\r\npopd\r\n\r\npushd repo\r\ndvc get $maindir/erepo models/\r\n```\r\n\r\nExpected behavior: \r\n- do not remove whole `repo` for starters\r\n- we should probably rethink the output in this case, seems like the problem related to some recent UI issues #2839, #2691\nI have found another oddity, which might be related to the bug above. `dvc get` keeps nesting outputs. \r\n\r\nThe first `get` pulls a folder with two tsv files. \r\nThe second one creates a `prepared` folder inside that one. I would have expected `dvc get` to either do nothing or overwrite the existing tsv files when pulling from a different branch/ commit. \r\n\r\n```\r\n# start in an empty folder\r\n$ ls\r\n\r\n# pull output from a folder and define the output path\r\n$ dvc get https://github.com/iterative/example-get-started data/prepared -o dvc_files/\r\n$ ls dvc_files/\r\ntest.tsv train.tsv\r\n\r\n# running it again creates a prepared folder inside of the defined output_path\r\n$ dvc get https://github.com/iterative/example-get-started data/prepared -o dvc_files/\r\n# prepared gets nested inside of dvc_files\r\n$ ls dvc_files/\r\nprepared test.tsv train.tsv\r\n```\n@efiop , are you working on this one?\n> https://github.com/iterative/dvc/issues/3105#issuecomment-573854955\r\n\r\nNesting only happens once, this is due to `resolve_output`. Can't tell what was the original intention behind those lines: \r\n\r\nhttps://github.com/iterative/dvc/blob/fe635a5040c9d593cd1bdef3fa31f4df7af85259/dvc/utils/__init__.py#L336-L344\r\n\r\n@efiop ?\n@pared , I was trying to write a test based on your reproduction script. But didn't understand why you were `git push`ing. Could you explain it for me?\r\n\r\nBy the way, here's what I have so far:\r\n\r\n```python\r\ndef test_error(tmp_dir, dvc, erepo_dir):\r\n with tempfile.TemporaryDirectory() as remote:\r\n git.Repo.init(remote)\r\n\r\n with erepo_dir.chdir():\r\n erepo_dir.scm_gen(\"dir/file\", \"text\", commit=\"create file\")\r\n erepo_dir.dvc_add(\"dir\")\r\n origin = erepo_dir.scm.repo.create_remote(\"origin\", remote)\r\n origin.push()\r\n\r\n os.remove(fspath(erepo_dir / \"dir\" / \"file\"))\r\n shutil.rmtree(fspath(erepo_dir / \".dvc\" / \"cache\"))\r\n\r\n dvc.get(erepo_dir, \"dir\")\r\n assert git.Repo(remote)\r\n```\r\n\r\nIt passes, so I'm not sure if I'm translating it correctly :confused: \n@mroutis, `git push` is unnecessary, dunno how it got there, removing it still makes the bug reproducible.\r\n\r\n[EDIT]\r\n@mroutis, I modified your test a little bit:\r\n```python\r\ndef test_error(tmp_dir, dvc, erepo_dir):\r\n with erepo_dir.chdir():\r\n erepo_dir.gen(\"dir/file\", \"text\") # just gen here, not scm_gen\r\n erepo_dir.dvc_add(\"dir\", commit=\"add dir\")\r\n\r\n os.remove(fspath(erepo_dir / \"dir\" / \"file\"))\r\n shutil.rmtree(fspath(erepo_dir / \".dvc\" / \"cache\"))\r\n\r\n try:\r\n with mock.patch(\"dvc.prompt.confirm\", return_value=True):\r\n dvc.get(fspath(erepo_dir), \"dir/\") # note that we get \"dir/\" not \"dir\"\r\n except Exception:\r\n pass\r\n finally:\r\n assert tmp_dir.exists()\r\n```\n@pared , great! Thanks for clearing it out)\n@pared , I still don't get it. Tests are passing on my side, I'm loosing it on this one :sweat_smile: \n@mroutis even the one that I provided? ":1,"We could simply set `no_traverse = False` in `RemoteHTTP.__init__` permanently, \nThrow an exception if no_traverse is set for HTTP\nAdd test for this\n@efiop , does it make sense to raise an exception? what about defaulting to `traverse`?\r\nJust setting `self.no_traverse = False` on the `RemoteHTTP.__init__` should work as intended.\n@mroutis But in that case setting that option in config will have no affect on it and user won't even know that it is not working.\n@efiop , as @pared noticed, `list_cache_paths` is not implemented for `RemoteHTTP` (neither `gc`). I'm wondering how `HTTP` was used as a cache :thinking: \n@mroutis it is used through no_traverse and has been that way always. gc is not implemented because HTTP doesn't support write, http has been always used as read-only(i.e. pull/fetch only).\n@efiop , but what are you going to pull if you haven't `push`ed anything?\n@mroutis You could push through s3 and then pull through http, as we've been doing in dvc for a long time already https://github.com/iterative/dvc/blob/master/.dvc/config#L2\nYep, the same we do with `example-get-started` - `dvc remote add --local s3` to push and `dvc remote add http` to then expose it to public as a read-only data repo.":1,"please try master, since this might have been fixed/changed by #582\nSame on master. wording has indeed changed but there is still a difference between the given and permitted where the former shows a single item and the latter shows the whole list...\nThanks for checking, PR welcome.":1,"Hello @snazzyfox \r\nUnfortunately we currently have some [custom logic](https://github.com/samuelcolvin/pydantic/blob/master/pydantic/schema.py#L820), which doesn't support `Frozenset` as the origin is `frozenset`.\r\nWe could probably just add a `if issubclass(origin, frozenset) ...` and add the immutability in `conset` or duplicate some logic to have a `confrozenset` \nThanks for taking a look! This totally makes sense. \r\n\r\nI think whether we create `confrozenset` or `conset(frozen=True)` depends on if we want to also duplicate the `ConstrainedSet` class. The code for frozen sets will most probably be almost identical, just with a different base class. If there's a way we can update/reuse the existing class, adding a immutable option to `conset` would make more sense.":1,"Related to https://github.com/iterative/dvc/issues/3182 , as we will also need to download .dir cache files and parse them for this functionality.\nThis will be a very useful change, +1 \nClosing this in favor of https://github.com/iterative/dvc/issues/4107 , since it is newer. I have a WIP PR that should be ready later today. ":1,"I recently ran into this as well, and it certainly was unexpected to me. I ended up writing a small patch to the metaclass which removes overridden validators post-hoc. I post it below in case it is useful to others.\r\n\r\n```python\r\nfrom pydantic import BaseModel\r\nfrom pydantic.main import ModelMetaclass\r\n\r\ndef remove_overridden_validators(model: BaseModel) -> BaseModel:\r\n \"\"\"\r\n Currently a Pydantic bug prevents subclasses from overriding root validators.\r\n (see https://github.com/samuelcolvin/pydantic/issues/1895)\r\n This function inspects a Pydantic model and removes overriden\r\n root validators based of their `__name__`.\r\n Assumes that the latest entries in `__pre_root_validators__` and\r\n `__post_root_validators__` are earliest in the MRO, which seems to be\r\n the case.\r\n \"\"\"\r\n model.__pre_root_validators__ = list(\r\n {validator.__name__: validator\r\n for validator in model.__pre_root_validators__\r\n }.values())\r\n model.__post_root_validators__ = list(\r\n {validator.__name__: (skip_on_failure, validator)\r\n for skip_on_failure, validator in model.__post_root_validators__\r\n }.values())\r\n return model\r\n\r\nclass PatchedModelMetaclass(ModelMetaclass):\r\n def __new__(*args, **kwargs):\r\n model = ModelMetaclass.__new__(*args, **kwargs)\r\n return remove_overridden_validators(model)\r\n```\r\nThe following bit of code tests that it works as expected:\r\n```python\r\nfrom pydantic import BaseModel, root_validator\r\n\r\nclass A(BaseModel):\r\n# class A(BaseModel, metaclass=PatchedModelMetaclass):\r\n a: int\r\n @root_validator(pre=True)\r\n def pre_root(cls, values):\r\n print(\"pre rootA\")\r\n return values\r\n @root_validator(pre=False)\r\n def post_root(cls, values):\r\n print(\"post rootA\")\r\n return values\r\n\r\nclass B(A):\r\n @root_validator(pre=True)\r\n def pre_root(cls, values):\r\n print(\"pre rootB\")\r\n return values\r\n @root_validator(pre=False)\r\n def post_root(cls, values):\r\n print(\"post rootB\")\r\n return values\r\n\r\n# This prints only from the validators in B if PatchedModelMetaclass is used\r\nB(a=1)\r\n```":1,"https://github.com/iterative/dvc/pull/6369/checks?check_run_id=3218847399\nThe issue seems to be due to the latest version update of `python-benedict` (i.e. `0.24.1`). I looked into the internals of it, and the problem is the way we expect it to keep the references to the dictionary to update is wrong (though this is how `benedict` is supposed to work, but well it cannot keep pointers to the empty dictionary easily). I'll think of creating an issue later, but I think it's wise for us to just skip empty dictionaries, and only update non-empty ones.\r\n\r\nSee https://github.com/fabiocaccamo/python-benedict/issues/57#issuecomment-890490982 for more information.":1,"I think I’ve noticed this before with slightly different resulting issues but thought it might just be a limitation of python. Thanks for breaking this down.\r\n\r\nI think there doesn’t need to be a config value, parent annotations should probably just be used if they exist; if you want to override you should probably just have to do it explicitly. I’m not sure if this would be hard to implement though.":1,"> Not sure whether that check is as simple as checking existence of .dvc\r\n\r\nHm, probably not that, because git-ignored stuff under `.dvc` _should_ stick around even when you checkout a branch outside of dvc control. But maybe `git ls-files -- .dvc`?\nWith very minimal testing this post-checkout hook seems to fix at least my immediate problem:\r\n\r\n```\r\n#!/bin/sh\r\nLS_FILES=`git ls-files .dvc`\r\n[ \"$LS_FILES\" = \"\" ] || exec dvc checkout\r\n```\nSame thing happens on `git commit` in different branches. So `pre-commit` hook should also be conditional on this check. Maybe the `pre-push` hook too?\n@gthb dvc detects that this is a dvc repository by finding `.dvc` directory. When switching branches, you have some gitignored(in dvc branch) files there, so git leaves them under `.dvc`, and so dvc still detects your non-dvc branch as dvc branch. I don't see any good solution to handle that, except maybe modifying your git-hooks to only run on dvc branch. E.g. `.git/hooks/post-checkout`\r\n```\r\n#!/bin/sh \r\n \r\nif [ \"$(git rev-parse --abrev-ref HEAD)\" == \"my-dvc-branch\" ]; then \r\n exec dvc checkout \r\nfi\r\n```\n@gthb Oops, sorry, your later messages didn't show up for me for some reason. I like the `ls-files` trick a lot! Very neat! :slightly_smiling_face: Let's think if we could always include it as a header to our git hooks. I can only see a possible problem when you didn't git commit your .dvc files yet, but even that should not be a problem, since you usually git ocmmit that stuff right after `dvc init`. What do you think?\nActually `git ls-files` by default shows files that are in the index (added but not yet committed), and `dvc init` adds the new files to the index, so these hooks will work directly after `dvc init`.\r\n\r\n```\r\n$ dvc init\r\n[...]\r\n$ git ls-files .dvc\r\n.dvc/.gitignore\r\n.dvc/config\r\n$ git reset\r\n$ git ls-files .dvc\r\n$\r\n```\r\n\r\nBut yeah, instructions should probably point out that (with this `ls-files` change) the hooks installed by `dvc install` only act in a “dvc-enabled” branch, meaning one that has tracked files under `.dvc`, and initializing dvc on a branch doesn't affect other branches until the change gets merged to those branches. (So you can safely try it out in a branch before merging to master, for instance.)\n@gthb Would you like to contribute a patch for this? :slightly_smiling_face: You'd simply need to add that to https://github.com/iterative/dvc/blob/0.50.1/dvc/scm/git/__init__.py#L230 .\n@gthb Not sure if the message got lost or if I should take this as \"no\" :slightly_smiling_face: \nAh, whoops, sorry, you should take it as “away on vacation and not paying\nmuch attention” :) ... I can whip up a simple PR when I'm back home next\nweekend.\n\nOn Mon, Jul 22, 2019, 23:31 Ruslan Kuprieiev \nwrote:\n\n> @gthb Not sure if the message got lost or if I\n> should take this as \"no\" :)\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or mute the thread\n> \n> .\n>\n\n@gthb We would really appreciate that :slightly_smiling_face: Have a great vacation!":1,"As a first step we might just add a check that dependency is not a dvc file and throw an error. Having it as a feature also sounds quite neat, so one could do `-d my.dvc` which would mean \"depend on all outputs of my.dvc\". Not sure if that feature is desirable though, since it might be used by accident causing some obscure bugs in the pipeline.\nDiscord context https://discordapp.com/channels/485586884165107732/485596304961962003/605892459461083140\nohh, @efiop, that sounds really neat! never thought about it, and it make sense since \"the output of a stage become the input of the next one\".":1,"For the record: we can either `chdir()` into the temporary repo root or to make a global variable that we can adjust that will make `PathInfo.str()` use it for `relpath` (Kudos @Suor).\nThis is not only about `dvc get`. Import are also affected, see #2691\nAn addition about global variable, which might be used as base for `PathInfo.__str__()`, it should be:\r\n- really a thread local, not a simple variable\r\n- not accessed nor changed outside `dvc/path_info.py` directly\r\n- managed with a context manager:\r\n```python\r\nwith path_info_base(some_dir):\r\n # ... path_infos are stringified differently here\r\n```\r\n\r\nThis will protect us from usual global var issues making it a dynamic scope variable if you know what I mean ;)\nFrom discord:\r\n\r\n---\r\n@kurianbenoy:\r\nI am interested to fix that issue, even if it takes more time to understand the codebase\r\n \r\n@efiop:\r\nGlad to hear that! :slight_smile: Got it. Okay, let's think about some clever ways to fix that then.\r\nok, so we are using named cache for that. We could make it check if we are inside of a repo or not and if not then print a relpath computed relative to the repo.root_dir instead of cwd.\r\nthe piece of the code that is responsible for that is https://github.com/iterative/dvc/blob/0.66.4/dvc/output/base.py#L410\r\n\r\nspecifically `str(self)` that is passed as a name\r\nso if we could do something like:\r\n```python\r\nif os.getcwd().startswith(self.stage.repo.root_dir):\r\n name = str(self)\r\nelse:\r\n name = relpath(self.path_info, self.stage.repo.root_dir)\r\n```\r\nand then use that `name` instead of `str(self)` in that line, then it would automatically start working in an acceptable way.\r\n\r\n@Suor: Some outs might be outside repo dir, but still \"in repo\", referencing them simply by name would be confusing\nSo the core culprit is stringifying `PathInfo` objects, which don't know about repo and so can't show sane paths, stringifying output objects is, however, ok. So:\r\n- it's ok to use `str(self)` where @efiop mentioned\r\n- `OutputLocal.__str__()` should be fixed though\r\n- the code Ruslan showed above is almost right, the issue is that a dvc file might refer outside repo dir with a rel path\r\n\r\nI propose this logic for `OutputLocal.__str__()`:\r\n```python\r\nif not self.is_in_repo:\r\n return self.def_path\r\nelif :\r\n return relpath(self.path_info, curdir)\r\nelse:\r\n return relpath(self.path_info, self.repo.root_dir)\r\n``` \r\nWhich means we show relative paths for the repo we are in and rel to repo root paths for all other repos. Remotes and absolute paths are always shown as is.\r\n\r\nOn `` this not quire `.startswith()` as that is wrong for paths:\r\n```python\r\n\"a/b/c\" isin \"a/b\" == True\r\n\"a/bc\" isin \"a/b\" == False\r\n```\r\nThis would fix the issue and we won't luckily need chdirs nor a global var.\r\n\r\n**P.S.** As a second cleanup/refactor stage I would propose completely stop using `PathInfo.__str__()`, since it fundamentally doesn't know how to stringify itself. Raise `TypeError()` there and fix all the usages. This is definitely separate, don't think about it until the issue itself is resolved. ":1,"I'm not sure what the correct behaviour would be here?\r\n\r\nSince the model is self referencing, the schema surely is infinitely recursive?\r\n\r\n@tiangolo do you have an idea about what to do here? Maybe just a more constructive error.\nYes, it should be able to create the JSON Schema, assign an ID to it and then reference it with `$ref`. Here's the relevant section in the spec: http://json-schema.org/latest/json-schema-core.html#rfc.section.8.3\r\n\r\nI'll try to fix it as soon as I get a chance.\nAwesome, thank you. I think you're the best person to do it since you understand schema best.":1,"Hi @PeterFogh !\r\n\r\nThanks for reporting it! That part of the python API is not officially released yet, so there definitely will be some bugs 🙁 Are you using it in one of your scripts? Could you describe the scenario? E.g. how big is the graph and what `networkx` version are you using?\nYou are welcome. Yaah. I know, I'm pushing DVC to the experimental level 😜.\r\nYour scenario is to save the DVC pipeline graph as a PNG image file - which we store in MLflow as an artefact. \r\n\r\nThe pipeline looks like this by `dvc pipeline show --ascii`:\r\n```\r\n+-------------------+ +---------------------+ +------------------------------+ +---------------------------+\r\n| sql_extration.dvc | | dawiki_download.dvc | | landbrugsinfo_extraction.dvc | **********| lkontoplan_extraction.dvc |\r\n+-------------------+ +---------------------+*** +------------------------------+* ****+---------------------------+\r\n * ******************** * ******** *\r\n * ********************* ****** * ******** *\r\n * ********************* *** * **** *\r\n +---------------+********** +-----------------------+ *\r\n | splitting.dvc |******* | pretrain_word2vec.dvc | *\r\n +---------------+ *************** +-----------------------+ *\r\n * **************** * *\r\n * *************** * *\r\n * ******** * *\r\n * +--------------------+ *\r\n * *******| train_word2vec.dvc | *\r\n * *************** +--------------------+ *\r\n * ************** * *\r\n * *************** * *\r\n * ******** * *\r\n+-------------------+ +--------------+ *** *\r\n| featurization.dvc |*** ****| training.dvc |** **** *\r\n+-------------------+ ********* ******** +--------------+ ****** ***** *\r\n * ********* * ******** *\r\n * ******** ********* * **** ****** *\r\n * ***** ***** * *** ****** *\r\n +----------------+ +-----------------------+ **** *****\r\n | prediction.dvc |****** | model_to_azure_sa.dvc | *** *********\r\n +----------------+ *********** +-----------------------+ ** *********\r\n *********** *** *********\r\n *********** *** *********\r\n *********** *******\r\n ****+---------+\r\n | Dvcfile |\r\n +---------+\r\n```\r\n\r\nnetworkx version is:\r\n```\r\nconda list -n py36_v11 | grep networkx\r\n> networkx 2.3 py_0\r\n```\n@PeterFogh if you don't need to do anything with the Repo object after that, or if you can initialize it again, try to run `reverse(copy=False)`.\r\n\r\nI wonder if it's networkx bug? Or something it's something related to us due to deepcopy calls.\nk, was able to reproduce it this way:\r\n\r\n```\r\nfrom dvc.path_info import URLInfo\r\nu = URLInfo(\"ssh://user@test.com:/test1/test2/test3\")\r\ncopy.deepcopy(u)\r\n```\r\n\r\nso, it something related to our UrlInfo implementation.":1,"Thanks @markus1978 for reporting.\r\n\r\nWould you like to open a PR for fixing?\n@hramezani \r\n> Would you like to open a PR for fixing?\r\n\r\nSure. I guess, I should base of and target `1.10.X-fixes`?\n> @hramezani\r\n> \r\n> > Would you like to open a PR for fixing?\r\n> \r\n> Sure. I guess, I should base of and target `1.10.X-fixes`?\r\n\r\nYes":1,"@dmpetrov actually we do support yaml, thoguh my tests are very simple, and just uses yaml-encoded list\r\neg:\r\n```\r\n-y: 2\r\n-y: 3\r\n```\r\n\r\n\r\nThe original issue (mentioned by @jorgeorpinel ) is about finding metric inside dictionary. Yaml parsing is already present, mechanism for extracting array of values is used in `JSONPlotData` class, so this issue should be simply about adding `_find_data` method to `YAMLPlotData`, and associated tests.\n@pared so, it this a bug? let's label it appropriately and discuss with @efiop if can fix it this sprint before release.\n> original issue (mentioned by @jorgeorpinel ) is about finding metric inside dictionary\r\n\r\nIf you just convert the JSON example from https://dvc.org/doc/command-reference/plots/show#example-hierarchical-data-json to YAML and try to use it, it gives the same error.\r\n\r\n```yaml\r\n---\r\ntrain:\r\n- accuracy: 0.96658\r\n loss: 0.10757\r\n- accuracy: 0.97641\r\n loss: 0.07324\r\n- accuracy: 0.87707\r\n loss: 0.08136\r\n- accuracy: 0.87402\r\n loss: 0.09026\r\n- accuracy: 0.8795\r\n loss: 0.0764\r\n- accuracy: 0.88038\r\n loss: 0.07608\r\n- accuracy: 0.89872\r\n loss: 0.08455\r\n```\r\n> Save as `train.yaml.default`\r\n\r\n```console\r\nλ dvc run -n yamlplot -d train.yaml.default --plots train.yaml \\\r\n cp train.yaml.default train.yaml\r\n...\r\nλ dvc plots show train.yaml\r\nERROR: unexpected error - 'str' object has no attribute 'keys'\r\n```\r\n\nUPDATE: If you remove the first 2 lines form the YAML file\r\n\r\n```yaml\r\n---\r\ntrain:\r\n```\r\n\r\nit does generate the plot correctly. Not sure whether this is how it should work, please lmk so we can document YAML support and write some examples.":1,"@jnissin, can you share the output from `dvc push -v`? \nHere you go @skshetry :\r\n\r\n```console\r\n$ dvc push -v\r\n2022-05-11 13:48:51,201 DEBUG: Preparing to transfer data from '/Users/***/Documents/code/work/***/.dvc/cache' to 'gs://valimo-dvc-storage'\r\n2022-05-11 13:48:51,201 DEBUG: Preparing to collect status from 'gs://valimo-dvc-storage'\r\n2022-05-11 13:48:51,201 DEBUG: Collecting status from 'gs://valimo-dvc-storage'\r\n2022-05-11 13:48:51,204 ERROR: unexpected error - unsupported pickle protocol: 5\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/cli/__init__.py\", line 90, in main\r\n ret = cmd.do_run()\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/cli/command.py\", line 22, in do_run\r\n return self.run()\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/commands/data_sync.py\", line 68, in run\r\n glob=self.args.glob,\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/repo/__init__.py\", line 48, in wrapper\r\n return f(repo, *args, **kwargs)\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/repo/push.py\", line 69, in push\r\n obj_ids, jobs, remote=remote, odb=odb or dest_odb\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data_cloud.py\", line 91, in push\r\n cache_odb=self.repo.odb.local,\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data/transfer.py\", line 155, in transfer\r\n src, dest, obj_ids, check_deleted=False, jobs=jobs, **kwargs\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data/status.py\", line 181, in compare_status\r\n dest, obj_ids, index=dest_index, jobs=jobs, **kwargs\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data/status.py\", line 138, in status\r\n odb, index, dir_objs, name, cache_odb, jobs=jobs\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data/status.py\", line 36, in _indexed_dir_hashes\r\n indexed_dirs = set(index.dir_hashes())\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data/db/index.py\", line 119, in dir_hashes\r\n yield from (hash_ for hash_, is_dir in self.index.items() if is_dir)\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/dvc/data/db/index.py\", line 119, in \r\n yield from (hash_ for hash_, is_dir in self.index.items() if is_dir)\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/_collections_abc.py\", line 744, in __iter__\r\n yield (key, self._mapping[key])\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/diskcache/persistent.py\", line 730, in __getitem__\r\n return self._cache[key]\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/diskcache/core.py\", line 1232, in __getitem__\r\n value = self.get(key, default=ENOVAL, retry=True)\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/diskcache/core.py\", line 1173, in get\r\n value = self._disk.fetch(mode, filename, db_value, read)\r\n File \"/opt/anaconda3/envs/valimo-local/lib/python3.7/site-packages/diskcache/core.py\", line 285, in fetch\r\n return pickle.load(io.BytesIO(value))\r\nValueError: unsupported pickle protocol: 5\r\n------------------------------------------------------------\r\n2022-05-11 13:48:51,316 DEBUG: Removing '/Users/***/Documents/code/work/***/.mJCQTQ7VWcdKbQvxRdcp2y.tmp'\r\n2022-05-11 13:48:51,318 DEBUG: Removing '/Users/***/Documents/code/work/***/.mJCQTQ7VWcdKbQvxRdcp2y.tmp'\r\n2022-05-11 13:48:51,318 DEBUG: Removing '/Users/***/Documents/code/work/***/.mJCQTQ7VWcdKbQvxRdcp2y.tmp'\r\n2022-05-11 13:48:51,318 DEBUG: Removing '/Users/***/Documents/code/work/***/.dvc/cache/.68G3bcZEzc4zjdHpDsNqYj.tmp'\r\n2022-05-11 13:48:51,322 DEBUG: Version info for developers:\r\nDVC version: 2.10.2 (conda)\r\n---------------------------------\r\nPlatform: Python 3.7.11 on Darwin-21.2.0-x86_64-i386-64bit\r\nSupports:\r\n gs (gcsfs = 2022.1.0),\r\n webhdfs (fsspec = 2022.1.0),\r\n http (aiohttp = 3.8.1, aiohttp-retry = 2.4.6),\r\n https (aiohttp = 3.8.1, aiohttp-retry = 2.4.6)\r\nCache types: reflink, hardlink, symlink\r\nCache directory: apfs on /dev/disk1s1s1\r\nCaches: local\r\nRemotes: gs\r\nWorkspace directory: apfs on /dev/disk1s1s1\r\nRepo: dvc, git\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n2022-05-11 13:48:51,326 DEBUG: Analytics is enabled.\r\n2022-05-11 13:48:51,457 DEBUG: Trying to spawn '['daemon', '-q', 'analytics', '/var/folders/4p/0n_mm8js23x1vhydv_3yq8b40000gq/T/tmp_4ld4dv3']'\r\n2022-05-11 13:48:51,459 DEBUG: Spawned '['daemon', '-q', 'analytics', '/var/folders/4p/0n_mm8js23x1vhydv_3yq8b40000gq/T/tmp_4ld4dv3']'\r\n```\nThis seems like https://dvc.org/doc/user-guide/troubleshooting#pickle but we are not catching the exception in `pull` in order to redirect users to the link:\r\n\r\nhttps://github.com/iterative/dvc/blob/47d10c4a8b9bdc18dec17a9380f0593e43383c4c/dvc/utils/decorators.py#L13-L30\nTrue, I can confirm that the following fixed the issue:\r\n\r\n```\r\nrm -r .dvc/tmp/index .dvc/tmp/md5s .dvc/tmp/links\r\n```\r\n\r\nThis is probably due to the fact that we have python 3.8 running in our pre-commit hooks environment.\n@jnissin this behavior (but not the incorrect error message) is expected if you are mixing python3.8 and 3.7 usage within the same repo. See the docs linked by @daavoo, you will need to remove `.dvc/tmp/index` before running in python 3.7 in this case.\nAlright, I'll try to switch our pre-commit environment to use python 3.7 as well. Thanks a lot!\n@jnissin Btw, did you migrate from an older dvc version recently? We had a bug where we didn't set pickle version https://github.com/iterative/dvc/pull/7222 , but it was fixed 5 months back in 2.9.4":1,"Thanks for reporting!\r\nI'm so glad to have people messing around with `master`!\r\nIt's always a great way to catch things beforehand 😄 ":1,"Thanks for reporting @Casyfill ! Indeed, we need to override it. We'll take a look.\nYou probably can instead just check for the xpath in the current Dvcfile, and override it with the explicit path, if needed? \n@Casyfill Yes, the explicitly specified xpath should override the one specified in a dvc file.":1,"> should not contain the extra .dvc/tmp/repro.dat, which is just used for dvc exp run to pass repro arguments.\r\n\r\nWe should probably not generate that file (add an option to skip?) when calling ` _stash_exp` from `experiments.save`":1,"Related: #5371 ":1,"Would this be enough to simplify it?\r\n\r\n```\r\n$ dvc plots templates --help\r\nusage: dvc plots templates [-h] [-q | -v] [-l] [-o ] [