{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\"\n\nTPL = (' \\n'\n ' {0}\\n'\n ' {1}\\n'\n ' ')\n\n\ndef format_lines(col1, col2):\n col1_format = getattr(col1.info, 'default_format', lambda x: x)\n col2_format = getattr(col2.info, 'default_format', lambda x: x)\n return '\\n'.join(TPL.format(col1_format(v1), col2_format(v2))\n for v1, v2 in zip(col1, col2))\n\n\ndef test_write_jsviewer_default(tmpdir):\n t = Table()\n t['a'] = [1, 2, 3, 4, 5]\n t['b'] = ['a', 'b', 'c', 'd', 'e']\n t['a'].unit = 'm'\n\n tmpfile = tmpdir.join('test.html').strpath\n\n t.write(tmpfile, format='jsviewer')\n ref = REFERENCE % dict(\n lines=format_lines(t['a'], t['b']),\n table_class='display compact',\n table_id=f'table{id(t)}',\n length='50',\n display_length='10, 25, 50, 100, 500, 1000',\n datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',\n datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',\n jquery_url='https://code.jquery.com/' + JQUERY_MIN_JS\n )\n with open(tmpfile) as f:\n assert f.read().strip() == ref.strip()\n\n\ndef test_write_jsviewer_overwrite(tmpdir):\n t = Table()\n t['a'] = [1, 2, 3, 4, 5]\n t['b'] = ['a', 'b', 'c', 'd', 'e']\n t['a'].unit = 'm'\n tmpfile = tmpdir.join('test.html').strpath\n\n # normal write\n t.write(tmpfile, format='jsviewer')\n # errors on overwrite\n with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):\n t.write(tmpfile, format='jsviewer')\n # unless specified\n t.write(tmpfile, format='jsviewer', overwrite=True)\n\n\n@pytest.mark.parametrize('mixin', [\n Time(['J2000', 'J2001']),\n Time([50000., 50001.0001], format='mjd'),\n SkyCoord(ra=[100., 110.], dec=[-10., 10.], unit='deg')])\ndef test_write_jsviewer_mixin(tmpdir, mixin):\n t = Table()\n t['a'] = [1, 2]\n t['b'] = mixin\n t['a'].unit = 'm'\n\n tmpfile = tmpdir.join('test.html').strpath\n\n t.write(tmpfile, format='jsviewer')\n ref = REFERENCE % dict(\n lines=format_lines(t['a'], t['b']),\n table_class='display compact',\n table_id=f'table{id(t)}',\n length='50',\n display_length='10, 25, 50, 100, 500, 1000',\n datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',\n datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',\n jquery_url='https://code.jquery.com/' + JQUERY_MIN_JS\n )\n with open(tmpfile) as f:\n assert f.read().strip() == ref.strip()\n\n\n@pytest.mark.skipif('not HAS_BLEACH')\ndef test_write_jsviewer_options(tmpdir):\n t = Table()\n t['a'] = [1, 2, 3, 4, 5]\n t['b'] = ['a', 'b', 'c', 'd', 'e']\n t['a'].unit = 'm'\n\n tmpfile = tmpdir.join('test.html').strpath\n t.write(tmpfile, format='jsviewer', table_id='test', max_lines=3,\n jskwargs={'display_length': 5}, table_class='display hover',\n htmldict=dict(raw_html_cols='b'))\n\n ref = REFERENCE % dict(\n lines=format_lines(t['a'][:3], t['b'][:3]),\n table_class='display hover',\n table_id='test',\n length='5',\n display_length='5, 10, 25, 50, 100, 500, 1000',\n datatables_css_url='https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css',\n datatables_js_url='https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',\n jquery_url='https://code.jquery.com/' + JQUERY_MIN_JS\n )\n with open(tmpfile) as f:\n assert f.read().strip() == ref.strip()\n\n\ndef test_write_jsviewer_local(tmpdir):\n t = Table()\n t['a'] = [1, 2, 3, 4, 5]\n t['b'] = ['a', 'b', 'c', 'd', 'e']\n t['a'].unit = 'm'\n\n tmpfile = tmpdir.join('test.html').strpath\n\n t.write(tmpfile, format='jsviewer', table_id='test',\n jskwargs={'use_local_files': True})\n ref = REFERENCE % dict(\n lines=format_lines(t['a'], t['b']),\n table_class='display compact',\n table_id='test',\n length='50',\n display_length='10, 25, 50, 100, 500, 1000',\n datatables_css_url='file://' + join(EXTERN_DIR, 'css', 'jquery.dataTables.css'),\n datatables_js_url='file://' + join(EXTERN_DIR, 'js', 'jquery.dataTables.min.js'),\n jquery_url='file://' + join(EXTERN_DIR, 'js', JQUERY_MIN_JS)\n )\n with open(tmpfile) as f:\n assert f.read().strip() == ref.strip()\n\n\n@pytest.mark.skipif('not HAS_IPYTHON')\ndef test_show_in_notebook():\n t = Table()\n t['a'] = [1, 2, 3, 4, 5]\n t['b'] = ['b', 'c', 'a', 'd', 'e']\n\n htmlstr_windx = t.show_in_notebook().data # should default to 'idx'\n htmlstr_windx_named = t.show_in_notebook(show_row_index='realidx').data\n htmlstr_woindx = t.show_in_notebook(show_row_index=False).data\n\n assert (textwrap.dedent(\"\"\"\n idxab\n 01b\n 12c\n 23a\n 34d\n 45e\n \"\"\").strip() in htmlstr_windx)\n\n assert 'realidxab' in htmlstr_windx_named\n\n assert 'ab' in htmlstr_woindx\n"}}},{"rowIdx":1348,"cells":{"hash":{"kind":"string","value":"24c58c555eafd5e877d150a7e2c3d9bd7e1a571f76941855bdd15f7952287594"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom collections import OrderedDict, UserDict\nfrom collections.abc import Mapping\n\nimport pytest\nimport numpy as np\n\nfrom astropy.table import Column, TableColumns, Table, MaskedColumn\nimport astropy.units as u\n\n\nclass DictLike(Mapping):\n \"\"\"A minimal mapping-like object that does not subclass dict.\n\n This is used to test code that expects dict-like but without actually\n inheriting from dict.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._data = dict(*args, **kwargs)\n\n def __getitem__(self, item):\n return self._data[item]\n\n def __setitem__(self, item, value):\n self._data[item] = value\n\n def __iter__(self):\n return iter(self._data)\n\n def __len__(self):\n return len(self._data)\n\n\nclass TestTableColumnsInit():\n def test_init(self):\n \"\"\"Test initialisation with lists, tuples, dicts of arrays\n rather than Columns [regression test for #2647]\"\"\"\n x1 = np.arange(10.)\n x2 = np.arange(5.)\n x3 = np.arange(7.)\n col_list = [('x1', x1), ('x2', x2), ('x3', x3)]\n tc_list = TableColumns(col_list)\n for col in col_list:\n assert col[0] in tc_list\n assert tc_list[col[0]] is col[1]\n\n col_tuple = (('x1', x1), ('x2', x2), ('x3', x3))\n tc_tuple = TableColumns(col_tuple)\n for col in col_tuple:\n assert col[0] in tc_tuple\n assert tc_tuple[col[0]] is col[1]\n\n col_dict = dict([('x1', x1), ('x2', x2), ('x3', x3)])\n tc_dict = TableColumns(col_dict)\n for col in tc_dict.keys():\n assert col in tc_dict\n assert tc_dict[col] is col_dict[col]\n\n columns = [Column(col[1], name=col[0]) for col in col_list]\n tc = TableColumns(columns)\n for col in columns:\n assert col.name in tc\n assert tc[col.name] is col\n\n\n# pytest.mark.usefixtures('table_type')\nclass BaseInitFrom():\n def _setup(self, table_type):\n pass\n\n def test_basic_init(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=('a', 'b', 'c'))\n assert t.colnames == ['a', 'b', 'c']\n assert np.all(t['a'] == np.array([1, 3]))\n assert np.all(t['b'] == np.array([2, 4]))\n assert np.all(t['c'] == np.array([3, 5]))\n assert all(t[name].name == name for name in t.colnames)\n\n def test_set_dtype(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=('a', 'b', 'c'), dtype=('i4', 'f4', 'f8'))\n assert t.colnames == ['a', 'b', 'c']\n assert np.all(t['a'] == np.array([1, 3], dtype='i4'))\n assert np.all(t['b'] == np.array([2, 4], dtype='f4'))\n assert np.all(t['c'] == np.array([3, 5], dtype='f8'))\n assert t['a'].dtype.type == np.int32\n assert t['b'].dtype.type == np.float32\n assert t['c'].dtype.type == np.float64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_names_dtype_mismatch(self, table_type):\n self._setup(table_type)\n with pytest.raises(ValueError):\n table_type(self.data, names=('a',), dtype=('i4', 'f4', 'i4'))\n\n def test_names_cols_mismatch(self, table_type):\n self._setup(table_type)\n with pytest.raises(ValueError):\n table_type(self.data, names=('a',), dtype=('i4'))\n\n\n@pytest.mark.usefixtures('table_type')\nclass BaseInitFromListLike(BaseInitFrom):\n\n def test_names_cols_mismatch(self, table_type):\n self._setup(table_type)\n with pytest.raises(ValueError):\n table_type(self.data, names=['a'], dtype=[int])\n\n def test_names_copy_false(self, table_type):\n self._setup(table_type)\n with pytest.raises(ValueError):\n table_type(self.data, names=['a'], dtype=[int], copy=False)\n\n\n@pytest.mark.usefixtures('table_type')\nclass BaseInitFromDictLike(BaseInitFrom):\n pass\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromNdarrayHomo(BaseInitFromListLike):\n\n def setup_method(self, method):\n self.data = np.array([(1, 2, 3),\n (3, 4, 5)],\n dtype='i4')\n\n def test_default_names(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n assert t.colnames == ['col0', 'col1', 'col2']\n\n def test_ndarray_ref(self, table_type):\n \"\"\"Init with ndarray and copy=False and show that this is a reference\n to input ndarray\"\"\"\n self._setup(table_type)\n t = table_type(self.data, copy=False)\n t['col1'][1] = 0\n assert t.as_array()['col1'][1] == 0\n assert t['col1'][1] == 0\n assert self.data[1][1] == 0\n\n def test_partial_names_dtype(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['a', None, 'c'], dtype=[None, None, 'f8'])\n assert t.colnames == ['a', 'col1', 'c']\n assert t['a'].dtype.type == np.int32\n assert t['col1'].dtype.type == np.int32\n assert t['c'].dtype.type == np.float64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_ref(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['a', None, 'c'])\n assert t.colnames == ['a', 'col1', 'c']\n assert t['a'].dtype.type == np.int32\n assert t['col1'].dtype.type == np.int32\n assert t['c'].dtype.type == np.int32\n assert all(t[name].name == name for name in t.colnames)\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromListOfLists(BaseInitFromListLike):\n\n def setup_method(self, table_type):\n self._setup(table_type)\n self.data = [(np.int32(1), np.int32(3)),\n Column(name='col1', data=[2, 4], dtype=np.int32),\n np.array([3, 5], dtype=np.int32)]\n\n def test_default_names(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n assert t.colnames == ['col0', 'col1', 'col2']\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_dtype(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['b', None, 'c'],\n dtype=['f4', None, 'f8'])\n assert t.colnames == ['b', 'col1', 'c']\n assert t['b'].dtype.type == np.float32\n assert t['col1'].dtype.type == np.int32\n assert t['c'].dtype.type == np.float64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_bad_data(self, table_type):\n self._setup(table_type)\n with pytest.raises(ValueError):\n table_type([[1, 2],\n [3, 4, 5]])\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromListOfDicts(BaseInitFromListLike):\n\n def _setup(self, table_type):\n self.data = [{'a': 1, 'b': 2, 'c': 3},\n {'a': 3, 'b': 4, 'c': 5}]\n self.data_ragged = [{'a': 1, 'b': 2},\n {'a': 2, 'c': 4}]\n\n def test_names(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n assert all(colname in {'a', 'b', 'c'} for colname in t.colnames)\n\n def test_names_ordered(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=('c', 'b', 'a'))\n assert t.colnames == ['c', 'b', 'a']\n\n def test_missing_data_init_from_dict(self, table_type):\n self._setup(table_type)\n dat = self.data_ragged\n for rows in [False, True]:\n t = table_type(rows=dat) if rows else table_type(dat)\n\n assert np.all(t['a'] == [1, 2])\n assert np.all(t['b'].mask == [False, True])\n assert np.all(t['b'].data == [2, 2])\n assert np.all(t['c'].mask == [True, False])\n assert np.all(t['c'].data == [4, 4])\n\n assert type(t['a']) is (MaskedColumn if t.masked else Column)\n assert type(t['b']) is MaskedColumn\n assert type(t['c']) is MaskedColumn\n\n\nclass TestInitFromListOfMapping(TestInitFromListOfDicts):\n \"\"\"Test that init from a Mapping that is not a dict subclass works\"\"\"\n def _setup(self, table_type):\n self.data = [DictLike(a=1, b=2, c=3),\n DictLike(a=3, b=4, c=5)]\n self.data_ragged = [DictLike(a=1, b=2),\n DictLike(a=2, c=4)]\n # Make sure data rows are not a dict subclass\n assert not isinstance(self.data[0], dict)\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromColsList(BaseInitFromListLike):\n\n def _setup(self, table_type):\n self.data = [Column([1, 3], name='x', dtype=np.int32),\n np.array([2, 4], dtype=np.int32),\n np.array([3, 5], dtype='i8')]\n\n def test_default_names(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n assert t.colnames == ['x', 'col1', 'col2']\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_dtype(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['b', None, 'c'], dtype=['f4', None, 'f8'])\n assert t.colnames == ['b', 'col1', 'c']\n assert t['b'].dtype.type == np.float32\n assert t['col1'].dtype.type == np.int32\n assert t['c'].dtype.type == np.float64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_ref(self, table_type):\n \"\"\"Test that initializing from a list of columns can be done by reference\"\"\"\n self._setup(table_type)\n t = table_type(self.data, copy=False)\n t['x'][0] = 100\n assert self.data[0][0] == 100\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromNdarrayStruct(BaseInitFromDictLike):\n\n def _setup(self, table_type):\n self.data = np.array([(1, 2, 3),\n (3, 4, 5)],\n dtype=[('x', 'i8'), ('y', 'i4'), ('z', 'i8')])\n\n def test_ndarray_ref(self, table_type):\n \"\"\"Init with ndarray and copy=False and show that table uses reference\n to input ndarray\"\"\"\n self._setup(table_type)\n t = table_type(self.data, copy=False)\n\n t['x'][1] = 0 # Column-wise assignment\n t[0]['y'] = 0 # Row-wise assignment\n assert self.data['x'][1] == 0\n assert self.data['y'][0] == 0\n assert np.all(np.array(t) == self.data)\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_dtype(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'f8'])\n assert t.colnames == ['e', 'y', 'd']\n assert t['e'].dtype.type == np.float32\n assert t['y'].dtype.type == np.int32\n assert t['d'].dtype.type == np.float64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_ref(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['e', None, 'd'], copy=False)\n assert t.colnames == ['e', 'y', 'd']\n assert t['e'].dtype.type == np.int64\n assert t['y'].dtype.type == np.int32\n assert t['d'].dtype.type == np.int64\n assert all(t[name].name == name for name in t.colnames)\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromDict(BaseInitFromDictLike):\n\n def _setup(self, table_type):\n self.data = dict([('a', Column([1, 3], name='x')),\n ('b', [2, 4]),\n ('c', np.array([3, 5], dtype='i8'))])\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromMapping(BaseInitFromDictLike):\n\n def _setup(self, table_type):\n self.data = UserDict([('a', Column([1, 3], name='x')),\n ('b', [2, 4]),\n ('c', np.array([3, 5], dtype='i8'))])\n assert isinstance(self.data, Mapping)\n assert not isinstance(self.data, dict)\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromOrderedDict(BaseInitFromDictLike):\n\n def _setup(self, table_type):\n self.data = OrderedDict([('a', Column(name='x', data=[1, 3])),\n ('b', [2, 4]),\n ('c', np.array([3, 5], dtype='i8'))])\n\n def test_col_order(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n assert t.colnames == ['a', 'b', 'c']\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromRow(BaseInitFromDictLike):\n\n def _setup(self, table_type):\n arr = np.array([(1, 2, 3),\n (3, 4, 5)],\n dtype=[('x', 'i8'), ('y', 'i8'), ('z', 'f8')])\n self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']})\n\n def test_init_from_row(self, table_type):\n self._setup(table_type)\n t = table_type(self.data[0])\n\n # Values and meta match original\n assert t.meta['comments'][0] == 'comment1'\n for name in t.colnames:\n assert np.all(t[name] == self.data[name][0:1])\n assert all(t[name].name == name for name in t.colnames)\n\n # Change value in new instance and check that original is the same\n t['x'][0] = 8\n t.meta['comments'][1] = 'new comment2'\n assert np.all(t['x'] == np.array([8]))\n assert np.all(self.data['x'] == np.array([1, 3]))\n assert self.data.meta['comments'][1] == 'comment2'\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromTable(BaseInitFromDictLike):\n\n def _setup(self, table_type):\n arr = np.array([(1, 2, 3),\n (3, 4, 5)],\n dtype=[('x', 'i8'), ('y', 'i8'), ('z', 'f8')])\n self.data = table_type(arr, meta={'comments': ['comment1', 'comment2']})\n\n def test_data_meta_copy(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n assert t.meta['comments'][0] == 'comment1'\n t['x'][1] = 8\n t.meta['comments'][1] = 'new comment2'\n assert self.data.meta['comments'][1] == 'comment2'\n assert np.all(t['x'] == np.array([1, 8]))\n assert np.all(self.data['x'] == np.array([1, 3]))\n assert t['z'].name == 'z'\n assert all(t[name].name == name for name in t.colnames)\n\n def test_table_ref(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, copy=False)\n t['x'][1] = 0\n assert t['x'][1] == 0\n assert self.data['x'][1] == 0\n assert np.all(t.as_array() == self.data.as_array())\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_dtype(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['e', None, 'd'], dtype=['f4', None, 'i8'])\n assert t.colnames == ['e', 'y', 'd']\n assert t['e'].dtype.type == np.float32\n assert t['y'].dtype.type == np.int64\n assert t['d'].dtype.type == np.int64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_partial_names_ref(self, table_type):\n self._setup(table_type)\n t = table_type(self.data, names=['e', None, 'd'], copy=False)\n assert t.colnames == ['e', 'y', 'd']\n assert t['e'].dtype.type == np.int64\n assert t['y'].dtype.type == np.int64\n assert t['d'].dtype.type == np.float64\n assert all(t[name].name == name for name in t.colnames)\n\n def test_init_from_columns(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n t2 = table_type(t.columns['z', 'x', 'y'])\n assert t2.colnames == ['z', 'x', 'y']\n assert t2.dtype.names == ('z', 'x', 'y')\n\n def test_init_from_columns_slice(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n t2 = table_type(t.columns[0:2])\n assert t2.colnames == ['x', 'y']\n assert t2.dtype.names == ('x', 'y')\n\n def test_init_from_columns_mix(self, table_type):\n self._setup(table_type)\n t = table_type(self.data)\n t2 = table_type([t.columns[0], t.columns['z']])\n assert t2.colnames == ['x', 'z']\n assert t2.dtype.names == ('x', 'z')\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestInitFromNone():\n # Note table_table.TestEmptyData tests initializing a completely empty\n # table and adding data.\n\n def test_data_none_with_cols(self, table_type):\n \"\"\"\n Test different ways of initing an empty table\n \"\"\"\n np_t = np.empty(0, dtype=[('a', 'f4', (2,)),\n ('b', 'i4')])\n for kwargs in ({'names': ('a', 'b')},\n {'names': ('a', 'b'), 'dtype': (('f4', (2,)), 'i4')},\n {'dtype': [('a', 'f4', (2,)), ('b', 'i4')]},\n {'dtype': np_t.dtype}):\n t = table_type(**kwargs)\n assert t.colnames == ['a', 'b']\n assert len(t['a']) == 0\n assert len(t['b']) == 0\n if 'dtype' in kwargs:\n assert t['a'].dtype.type == np.float32\n assert t['b'].dtype.type == np.int32\n assert t['a'].shape[1:] == (2,)\n\n\n@pytest.mark.usefixtures('table_types')\nclass TestInitFromRows():\n\n def test_init_with_rows(self, table_type):\n for rows in ([[1, 'a'], [2, 'b']],\n [(1, 'a'), (2, 'b')],\n ((1, 'a'), (2, 'b'))):\n t = table_type(rows=rows, names=('a', 'b'))\n assert np.all(t['a'] == [1, 2])\n assert np.all(t['b'] == ['a', 'b'])\n assert t.colnames == ['a', 'b']\n assert t['a'].dtype.kind == 'i'\n assert t['b'].dtype.kind in ('S', 'U')\n # Regression test for\n # https://github.com/astropy/astropy/issues/3052\n assert t['b'].dtype.str.endswith('1')\n\n rows = np.arange(6).reshape(2, 3)\n t = table_type(rows=rows, names=('a', 'b', 'c'), dtype=['f8', 'f4', 'i8'])\n assert np.all(t['a'] == [0, 3])\n assert np.all(t['b'] == [1, 4])\n assert np.all(t['c'] == [2, 5])\n assert t.colnames == ['a', 'b', 'c']\n assert t['a'].dtype.str.endswith('f8')\n assert t['b'].dtype.str.endswith('f4')\n assert t['c'].dtype.str.endswith('i8')\n\n def test_init_with_rows_and_data(self, table_type):\n with pytest.raises(ValueError) as err:\n table_type(data=[[1]], rows=[[1]])\n assert \"Cannot supply both `data` and `rows` values\" in str(err.value)\n\n\n@pytest.mark.parametrize('has_data', [True, False])\ndef test_init_table_with_names_and_structured_dtype(has_data):\n \"\"\"Test fix for #10393\"\"\"\n arr = np.ones(2, dtype=np.dtype([('a', 'i4'), ('b', 'f4')]))\n data_args = [arr] if has_data else []\n t = Table(*data_args, names=['x', 'y'], dtype=arr.dtype)\n assert t.colnames == ['x', 'y']\n assert str(t['x'].dtype) == 'int32'\n assert str(t['y'].dtype) == 'float32'\n assert len(t) == (2 if has_data else 0)\n\n\n@pytest.mark.usefixtures('table_type')\ndef test_init_and_ref_from_multidim_ndarray(table_type):\n \"\"\"\n Test that initializing from an ndarray structured array with\n a multi-dim column works for both copy=False and True and that\n the referencing is as expected.\n \"\"\"\n for copy in (False, True):\n nd = np.array([(1, [10, 20]),\n (3, [30, 40])],\n dtype=[('a', 'i8'), ('b', 'i8', (2,))])\n t = table_type(nd, copy=copy)\n assert t.colnames == ['a', 'b']\n assert t['a'].shape == (2,)\n assert t['b'].shape == (2, 2)\n t['a'][0] = -200\n t['b'][1][1] = -100\n if copy:\n assert nd['a'][0] == 1\n assert nd['b'][1][1] == 40\n else:\n assert nd['a'][0] == -200\n assert nd['b'][1][1] == -100\n\n\n@pytest.mark.usefixtures('table_type')\n@pytest.mark.parametrize('copy', [False, True])\ndef test_init_and_ref_from_dict(table_type, copy):\n \"\"\"\n Test that initializing from a dict works for both copy=False and True and that\n the referencing is as expected.\n \"\"\"\n x1 = np.arange(10.)\n x2 = np.zeros(10)\n col_dict = dict([('x1', x1), ('x2', x2)])\n t = table_type(col_dict, copy=copy)\n assert set(t.colnames) == {'x1', 'x2'}\n assert t['x1'].shape == (10,)\n assert t['x2'].shape == (10,)\n t['x1'][0] = -200\n t['x2'][1] = -100\n if copy:\n assert x1[0] == 0.\n assert x2[1] == 0.\n else:\n assert x1[0] == -200\n assert x2[1] == -100\n\n\ndef test_add_none_object_column():\n \"\"\"Test fix for a problem introduced in #10636 (see\n https://github.com/astropy/astropy/pull/10636#issuecomment-676847515)\n \"\"\"\n t = Table(data={'a': [1, 2, 3]})\n t['b'] = None\n assert all(val is None for val in t['b'])\n assert t['b'].dtype.kind == 'O'\n\n\n@pytest.mark.usefixtures('table_type')\ndef test_init_from_row_OrderedDict(table_type):\n row1 = OrderedDict([('b', 1), ('a', 0)])\n row2 = {'a': 10, 'b': 20}\n rows12 = [row1, row2]\n row3 = dict([('b', 1), ('a', 0)])\n row4 = dict([('b', 11), ('a', 10)])\n rows34 = [row3, row4]\n t1 = table_type(rows=rows12)\n t2 = table_type(rows=rows34)\n t3 = t2[sorted(t2.colnames)]\n assert t1.colnames == ['b', 'a']\n assert t2.colnames == ['b', 'a']\n assert t3.colnames == ['a', 'b']\n\n\ndef test_init_from_rows_as_generator():\n rows = ((1 + ii, 2 + ii) for ii in range(2))\n t = Table(rows=rows)\n assert np.all(t['col0'] == [1, 2])\n assert np.all(t['col1'] == [2, 3])\n\n\n@pytest.mark.parametrize('dtype', ['fail', 'i4'])\ndef test_init_bad_dtype_in_empty_table(dtype):\n with pytest.raises(ValueError,\n match='type was specified but could not be parsed for column names'):\n Table(dtype=dtype)\n\n\ndef test_init_data_type_not_allowed_to_init_table():\n with pytest.raises(ValueError,\n match=\"Data type not allowed to init Table\"):\n Table('hello')\n\n\ndef test_init_Table_from_list_of_quantity():\n \"\"\"Test fix for #11327\"\"\"\n # Variation on original example in #11327 at the Table level\n data = [{'x': 5 * u.m, 'y': 1 * u.m}, {'x': 10 * u.m, 'y': 3}]\n t = Table(data)\n assert t['x'].unit is u.m\n assert t['y'].unit is None\n assert t['x'].dtype.kind == 'f'\n assert t['y'].dtype.kind == 'O'\n assert np.all(t['x'] == [5, 10])\n assert t['y'][0] == 1 * u.m\n assert t['y'][1] == 3\n"}}},{"rowIdx":1349,"cells":{"hash":{"kind":"string","value":"20c9f57c2eb6741c22842bf5418cc1eca996f04436a249c8d1e37c0a28bb84f2"},"content":{"kind":"string","value":"import os\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom astropy.table.scripts import showtable\n\nROOT = os.path.abspath(os.path.dirname(__file__))\nASCII_ROOT = os.path.join(ROOT, '..', '..', 'io', 'ascii', 'tests')\nFITS_ROOT = os.path.join(ROOT, '..', '..', 'io', 'fits', 'tests')\nVOTABLE_ROOT = os.path.join(ROOT, '..', '..', 'io', 'votable', 'tests')\n\n\ndef test_missing_file(capsys):\n showtable.main(['foobar.fits'])\n out, err = capsys.readouterr()\n assert err.startswith(\"ERROR: [Errno 2] No such file or directory: \"\n \"'foobar.fits'\")\n\n\ndef test_info(capsys):\n showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--info'])\n out, err = capsys.readouterr()\n assert out.splitlines() == ['',\n ' name dtype ',\n '------ -------',\n 'target bytes20',\n ' V_mag float32']\n\n\ndef test_stats(capsys):\n showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'), '--stats'])\n out, err = capsys.readouterr()\n expected = ['
',\n ' name mean std min max ',\n '------ ------- ------- ---- ----',\n 'target -- -- -- --',\n ' V_mag 12.866[0-9]? 1.72111 11.1 15.2']\n\n out = out.splitlines()\n assert out[:4] == expected[:4]\n # Here we use re.match as in some cases one of the values above is\n # platform-dependent.\n assert re.match(expected[4], out[4]) is not None\n\n\ndef test_fits(capsys):\n showtable.main([os.path.join(FITS_ROOT, 'data/table.fits')])\n out, err = capsys.readouterr()\n assert out.splitlines() == [' target V_mag',\n '------- -----',\n 'NGC1001 11.1',\n 'NGC1002 12.3',\n 'NGC1003 15.2']\n\n\ndef test_fits_hdu(capsys):\n from astropy.units import UnitsWarning\n with pytest.warns(UnitsWarning):\n showtable.main([\n os.path.join(FITS_ROOT, 'data/zerowidth.fits'),\n '--hdu', 'AIPS OF',\n ])\n\n out, err = capsys.readouterr()\n assert out.startswith(\n ' TIME SOURCE ID ANTENNA NO. SUBARRAY FREQ ID ANT FLAG STATUS 1\\n'\n ' DAYS \\n'\n '---------- --------- ----------- -------- ------- -------- --------\\n'\n '0.14438657 1 10 1 1 4 4\\n')\n\n\ndef test_csv(capsys):\n showtable.main([os.path.join(ASCII_ROOT, 'data/simple_csv.csv')])\n out, err = capsys.readouterr()\n assert out.splitlines() == [' a b c ',\n '--- --- ---',\n ' 1 2 3',\n ' 4 5 6']\n\n\ndef test_ascii_format(capsys):\n showtable.main([os.path.join(ASCII_ROOT, 'data/commented_header.dat'),\n '--format', 'ascii.commented_header'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [' a b c ',\n '--- --- ---',\n ' 1 2 3',\n ' 4 5 6']\n\n\ndef test_ascii_delimiter(capsys):\n showtable.main([os.path.join(ASCII_ROOT, 'data/simple2.txt'),\n '--format', 'ascii', '--delimiter', '|'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n \"obsid redshift X Y object rad \",\n \"----- -------- ---- ---- ----------- ----\",\n \" 3102 0.32 4167 4085 Q1250+568-A 9.0\",\n \" 3102 0.32 4706 3916 Q1250+568-B 14.0\",\n \" 877 0.22 4378 3892 'Source 82' 12.5\",\n ]\n\n\ndef test_votable(capsys):\n with np.errstate(over=\"ignore\"):\n # https://github.com/astropy/astropy/issues/13341\n showtable.main([os.path.join(VOTABLE_ROOT, 'data/regression.xml'),\n '--table-id', 'main_table', '--max-width', '50'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n ' string_test string_test_2 ... bitarray2 ',\n '----------------- ------------- ... -------------',\n ' String & test Fixed stri ... True .. False',\n 'String & test 0123456789 ... -- .. --',\n ' XXXX XXXX ... -- .. --',\n ' ... -- .. --',\n ' ... -- .. --']\n\n\ndef test_max_lines(capsys):\n showtable.main([os.path.join(ASCII_ROOT, 'data/cds2.dat'),\n '--format', 'ascii.cds', '--max-lines', '7',\n '--max-width', '30'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n ' SST ... Note',\n ' ... ',\n '--------------- ... ----',\n '041314.1+281910 ... --',\n ' ... ... ...',\n '044427.1+251216 ... --',\n '044642.6+245903 ... --',\n 'Length = 215 rows',\n ]\n\n\ndef test_show_dtype(capsys):\n showtable.main([os.path.join(FITS_ROOT, 'data/table.fits'),\n '--show-dtype'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n ' target V_mag ',\n 'bytes20 float32',\n '------- -------',\n 'NGC1001 11.1',\n 'NGC1002 12.3',\n 'NGC1003 15.2',\n ]\n\n\ndef test_hide_unit(capsys):\n showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'),\n '--format', 'ascii.cds'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n 'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',\n ' h min s deg arcmin arcsec mag GMsun',\n '----- --- --- ----- --- --- ------ ------ ----- ----- --- -----',\n ' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',\n ]\n\n showtable.main([os.path.join(ASCII_ROOT, 'data/cds.dat'),\n '--format', 'ascii.cds', '--hide-unit'])\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n 'Index RAh RAm RAs DE- DEd DEm DEs Match Class AK Fit ',\n '----- --- --- ----- --- --- --- --- ----- ----- --- ----',\n ' 1 3 28 39.09 + 31 6 1.9 -- I* -- 1.35',\n ]\n"}}},{"rowIdx":1350,"cells":{"hash":{"kind":"string","value":"53a02884d2a66520a099cb88256d52de64e52392aa95320054f5fd01e56b00ae"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nfrom astropy import table\nfrom astropy.table import pprint\n\n\nclass MyRow(table.Row):\n def __str__(self):\n return str(self.as_void())\n\n\nclass MyColumn(table.Column):\n pass\n\n\nclass MyMaskedColumn(table.MaskedColumn):\n pass\n\n\nclass MyTableColumns(table.TableColumns):\n pass\n\n\nclass MyTableFormatter(pprint.TableFormatter):\n pass\n\n\nclass MyTable(table.Table):\n Row = MyRow\n Column = MyColumn\n MaskedColumn = MyMaskedColumn\n TableColumns = MyTableColumns\n TableFormatter = MyTableFormatter\n\n\ndef test_simple_subclass():\n t = MyTable([[1, 2], [3, 4]])\n row = t[0]\n assert isinstance(row, MyRow)\n assert isinstance(t['col0'], MyColumn)\n assert isinstance(t.columns, MyTableColumns)\n assert isinstance(t.formatter, MyTableFormatter)\n\n t2 = MyTable(t)\n row = t2[0]\n assert isinstance(row, MyRow)\n assert str(row) == '(1, 3)'\n\n t3 = table.Table(t)\n row = t3[0]\n assert not isinstance(row, MyRow)\n assert str(row) != '(1, 3)'\n\n t = MyTable([[1, 2], [3, 4]], masked=True)\n row = t[0]\n assert isinstance(row, MyRow)\n assert str(row) == '(1, 3)'\n assert isinstance(t['col0'], MyMaskedColumn)\n assert isinstance(t.formatter, MyTableFormatter)\n\n\nclass ParamsRow(table.Row):\n \"\"\"\n Row class that allows access to an arbitrary dict of parameters\n stored as a dict object in the ``params`` column.\n \"\"\"\n\n def __getitem__(self, item):\n if item not in self.colnames:\n return super().__getitem__('params')[item]\n else:\n return super().__getitem__(item)\n\n def keys(self):\n out = [name for name in self.colnames if name != 'params']\n params = [key.lower() for key in sorted(self['params'])]\n return out + params\n\n def values(self):\n return [self[key] for key in self.keys()]\n\n\nclass ParamsTable(table.Table):\n Row = ParamsRow\n\n\ndef test_params_table():\n t = ParamsTable(names=['a', 'b', 'params'], dtype=['i', 'f', 'O'])\n t.add_row((1, 2.0, {'x': 1.5, 'y': 2.5}))\n t.add_row((2, 3.0, {'z': 'hello', 'id': 123123}))\n assert t['params'][0] == {'x': 1.5, 'y': 2.5}\n assert t[0]['params'] == {'x': 1.5, 'y': 2.5}\n assert t[0]['y'] == 2.5\n assert t[1]['id'] == 123123\n assert list(t[1].keys()) == ['a', 'b', 'id', 'z']\n assert list(t[1].values()) == [2, 3.0, 123123, 'hello']\n"}}},{"rowIdx":1351,"cells":{"hash":{"kind":"string","value":"aee33a85b110525749e09d487b63f2d9593000dcb6541834e8b6443dc2b19db9"},"content":{"kind":"string","value":"import numpy as np\nimport pickle\n\nfrom astropy.table import Table, Column, MaskedColumn, QTable\nfrom astropy.table.table_helpers import simple_table\nfrom astropy.units import Quantity, deg\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord\n\n\ndef test_pickle_column(protocol):\n c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})\n cs = pickle.dumps(c)\n cp = pickle.loads(cs)\n assert np.all(cp == c)\n assert cp.attrs_equal(c)\n assert cp._parent_table is None\n assert repr(c) == repr(cp)\n\n\ndef test_pickle_masked_column(protocol):\n c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm',\n meta={'a': 1})\n c.mask[1] = True\n c.fill_value = -99\n\n cs = pickle.dumps(c)\n cp = pickle.loads(cs)\n\n assert np.all(cp._data == c._data)\n assert np.all(cp.mask == c.mask)\n assert cp.attrs_equal(c)\n assert cp.fill_value == -99\n assert cp._parent_table is None\n assert repr(c) == repr(cp)\n\n\ndef test_pickle_multidimensional_column(protocol):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/4098\"\"\"\n\n a = np.zeros((3, 2))\n c = Column(a, name='a')\n cs = pickle.dumps(c)\n cp = pickle.loads(cs)\n\n assert np.all(c == cp)\n assert c.shape == cp.shape\n assert cp.attrs_equal(c)\n assert repr(c) == repr(cp)\n\n\ndef test_pickle_table(protocol):\n a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})\n b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',\n meta={'b': 1})\n\n for table_class in Table, QTable:\n t = table_class([a, b], meta={'a': 1, 'b': Quantity(10, unit='s')})\n t['c'] = Quantity([1, 2], unit='m')\n t['d'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])\n t['e'] = SkyCoord([125.0, 180.0] * deg, [-45.0, 36.5] * deg)\n\n ts = pickle.dumps(t)\n tp = pickle.loads(ts)\n\n assert tp.__class__ is table_class\n assert np.all(tp['a'] == t['a'])\n assert np.all(tp['b'] == t['b'])\n\n # test mixin columns\n assert np.all(tp['c'] == t['c'])\n assert np.all(tp['d'] == t['d'])\n assert np.all(tp['e'].ra == t['e'].ra)\n assert np.all(tp['e'].dec == t['e'].dec)\n assert type(tp['c']) is type(t['c']) # nopep8\n assert type(tp['d']) is type(t['d']) # nopep8\n assert type(tp['e']) is type(t['e']) # nopep8\n assert tp.meta == t.meta\n assert type(tp) is type(t)\n\n assert isinstance(tp['c'], Quantity if (table_class is QTable) else Column)\n\n\ndef test_pickle_masked_table(protocol):\n a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})\n b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',\n meta={'b': 1})\n t = Table([a, b], meta={'a': 1}, masked=True)\n t['a'].mask[1] = True\n t['a'].fill_value = -99\n\n ts = pickle.dumps(t)\n tp = pickle.loads(ts)\n\n for colname in ('a', 'b'):\n for attr in ('_data', 'mask', 'fill_value'):\n assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr))\n\n assert tp['a'].attrs_equal(t['a'])\n assert tp['b'].attrs_equal(t['b'])\n assert tp.meta == t.meta\n\n\ndef test_pickle_indexed_table(protocol):\n \"\"\"\n Ensure that any indices that have been added will survive pickling.\n \"\"\"\n t = simple_table()\n t.add_index('a')\n t.add_index(['a', 'b'])\n ts = pickle.dumps(t)\n tp = pickle.loads(ts)\n\n assert len(t.indices) == len(tp.indices)\n for index, indexp in zip(t.indices, tp.indices):\n assert np.all(index.data.data == indexp.data.data)\n assert index.data.data.colnames == indexp.data.data.colnames\n"}}},{"rowIdx":1352,"cells":{"hash":{"kind":"string","value":"196182825e9df1a9b5b8610afdf8b73d98eca8c93af063676d6d666c394fd0db"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom astropy.table.table_helpers import ArrayWrapper\nfrom astropy.coordinates.earth import EarthLocation\nfrom astropy.units.quantity import Quantity\nfrom collections import OrderedDict\nfrom contextlib import nullcontext\n\nimport pytest\nimport numpy as np\n\nfrom astropy.table import Table, QTable, TableMergeError, Column, MaskedColumn, NdarrayMixin\nfrom astropy.table.operations import _get_out_class, join_skycoord, join_distance\nfrom astropy import units as u\nfrom astropy.utils import metadata\nfrom astropy.utils.metadata import MergeConflictError\nfrom astropy import table\nfrom astropy.time import Time, TimeDelta\nfrom astropy.coordinates import (SkyCoord, SphericalRepresentation,\n UnitSphericalRepresentation,\n CartesianRepresentation,\n BaseRepresentationOrDifferential,\n search_around_3d)\nfrom astropy.coordinates.tests.test_representation import representation_equal\nfrom astropy.coordinates.tests.helper import skycoord_equal\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY # noqa\n\n\ndef sort_eq(list1, list2):\n return sorted(list1) == sorted(list2)\n\n\ndef check_mask(col, exp_mask):\n \"\"\"Check that col.mask == exp_mask\"\"\"\n if hasattr(col, 'mask'):\n # Coerce expected mask into dtype of col.mask. In particular this is\n # needed for types like EarthLocation where the mask is a structured\n # array.\n exp_mask = np.array(exp_mask).astype(col.mask.dtype)\n out = np.all(col.mask == exp_mask)\n else:\n # With no mask the check is OK if all the expected mask values\n # are False (i.e. no auto-conversion to MaskedQuantity if it was\n # not required by the join).\n out = np.all(exp_mask == False)\n return out\n\n\nclass TestJoin():\n\n def _setup(self, t_cls=Table):\n lines1 = [' a b c ',\n ' 0 foo L1',\n ' 1 foo L2',\n ' 1 bar L3',\n ' 2 bar L4']\n lines2 = [' a b d ',\n ' 1 foo R1',\n ' 1 foo R2',\n ' 2 bar R3',\n ' 4 bar R4']\n self.t1 = t_cls.read(lines1, format='ascii')\n self.t2 = t_cls.read(lines2, format='ascii')\n self.t3 = t_cls(self.t2, copy=True)\n\n self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))\n self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))\n\n self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),\n ('c', {'a': 1, 'b': 1}),\n ('d', 1),\n ('a', 1)])\n\n def test_table_meta_merge(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.join(self.t1, self.t2, join_type='inner')\n assert out.meta == self.meta_merge\n\n def test_table_meta_merge_conflict(self, operation_table_type):\n self._setup(operation_table_type)\n\n with pytest.warns(metadata.MergeConflictWarning) as w:\n out = table.join(self.t1, self.t3, join_type='inner')\n assert len(w) == 3\n\n assert out.meta == self.t3.meta\n\n with pytest.warns(metadata.MergeConflictWarning) as w:\n out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')\n assert len(w) == 3\n\n assert out.meta == self.t3.meta\n\n out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')\n\n assert out.meta == self.t3.meta\n\n with pytest.raises(MergeConflictError):\n out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')\n\n with pytest.raises(ValueError):\n out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')\n\n def test_both_unmasked_inner(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n\n # Basic join with default parameters (inner join on common keys)\n t12 = table.join(t1, t2)\n assert type(t12) is operation_table_type\n assert type(t12['a']) is type(t1['a']) # noqa\n assert type(t12['b']) is type(t1['b']) # noqa\n assert type(t12['c']) is type(t1['c']) # noqa\n assert type(t12['d']) is type(t2['d']) # noqa\n assert t12.masked is False\n assert sort_eq(t12.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 1 foo L2 R1',\n ' 1 foo L2 R2',\n ' 2 bar L4 R3'])\n # Table meta merged properly\n assert t12.meta == self.meta_merge\n\n def test_both_unmasked_left_right_outer(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n\n # Left join\n t12 = table.join(t1, t2, join_type='left')\n assert t12.has_masked_columns is True\n assert t12.masked is False\n for name in ('a', 'b', 'c'):\n assert type(t12[name]) is Column\n assert type(t12['d']) is MaskedColumn\n assert sort_eq(t12.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 foo L1 --',\n ' 1 bar L3 --',\n ' 1 foo L2 R1',\n ' 1 foo L2 R2',\n ' 2 bar L4 R3'])\n\n # Right join\n t12 = table.join(t1, t2, join_type='right')\n assert t12.has_masked_columns is True\n assert t12.masked is False\n assert sort_eq(t12.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 1 foo L2 R1',\n ' 1 foo L2 R2',\n ' 2 bar L4 R3',\n ' 4 bar -- R4'])\n\n # Outer join\n t12 = table.join(t1, t2, join_type='outer')\n assert t12.has_masked_columns is True\n assert t12.masked is False\n assert sort_eq(t12.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 foo L1 --',\n ' 1 bar L3 --',\n ' 1 foo L2 R1',\n ' 1 foo L2 R2',\n ' 2 bar L4 R3',\n ' 4 bar -- R4'])\n\n # Check that the common keys are 'a', 'b'\n t12a = table.join(t1, t2, join_type='outer')\n t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])\n assert np.all(t12a.as_array() == t12b.as_array())\n\n def test_both_unmasked_single_key_inner(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n\n # Inner join on 'a' column\n t12 = table.join(t1, t2, keys='a')\n assert type(t12) is operation_table_type\n assert type(t12['a']) is type(t1['a']) # noqa\n assert type(t12['b_1']) is type(t1['b']) # noqa\n assert type(t12['c']) is type(t1['c']) # noqa\n assert type(t12['b_2']) is type(t2['b']) # noqa\n assert type(t12['d']) is type(t2['d']) # noqa\n assert t12.masked is False\n assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',\n '--- --- --- --- ---',\n ' 1 foo L2 foo R1',\n ' 1 foo L2 foo R2',\n ' 1 bar L3 foo R1',\n ' 1 bar L3 foo R2',\n ' 2 bar L4 bar R3'])\n\n def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n\n # Left join\n t12 = table.join(t1, t2, join_type='left', keys='a')\n assert t12.has_masked_columns is True\n assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',\n '--- --- --- --- ---',\n ' 0 foo L1 -- --',\n ' 1 foo L2 foo R1',\n ' 1 foo L2 foo R2',\n ' 1 bar L3 foo R1',\n ' 1 bar L3 foo R2',\n ' 2 bar L4 bar R3'])\n\n # Right join\n t12 = table.join(t1, t2, join_type='right', keys='a')\n assert t12.has_masked_columns is True\n assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',\n '--- --- --- --- ---',\n ' 1 foo L2 foo R1',\n ' 1 foo L2 foo R2',\n ' 1 bar L3 foo R1',\n ' 1 bar L3 foo R2',\n ' 2 bar L4 bar R3',\n ' 4 -- -- bar R4'])\n\n # Outer join\n t12 = table.join(t1, t2, join_type='outer', keys='a')\n assert t12.has_masked_columns is True\n assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',\n '--- --- --- --- ---',\n ' 0 foo L1 -- --',\n ' 1 foo L2 foo R1',\n ' 1 foo L2 foo R2',\n ' 1 bar L3 foo R1',\n ' 1 bar L3 foo R2',\n ' 2 bar L4 bar R3',\n ' 4 -- -- bar R4'])\n\n def test_masked_unmasked(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t1m = operation_table_type(self.t1, masked=True)\n t2 = self.t2\n\n # Result table is never masked\n t1m2 = table.join(t1m, t2, join_type='inner')\n assert t1m2.masked is False\n\n # Result should match non-masked result\n t12 = table.join(t1, t2)\n assert np.all(t12.as_array() == np.array(t1m2))\n\n # Mask out some values in left table and make sure they propagate\n t1m['b'].mask[1] = True\n t1m['c'].mask[2] = True\n t1m2 = table.join(t1m, t2, join_type='inner', keys='a')\n assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',\n '--- --- --- --- ---',\n ' 1 -- L2 foo R1',\n ' 1 -- L2 foo R2',\n ' 1 bar -- foo R1',\n ' 1 bar -- foo R2',\n ' 2 bar L4 bar R3'])\n\n t21m = table.join(t2, t1m, join_type='inner', keys='a')\n assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',\n '--- --- --- --- ---',\n ' 1 foo R2 -- L2',\n ' 1 foo R2 bar --',\n ' 1 foo R1 -- L2',\n ' 1 foo R1 bar --',\n ' 2 bar R3 bar L4'])\n\n def test_masked_masked(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Two masked tables\"\"\"\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n t1 = self.t1\n t1m = operation_table_type(self.t1, masked=True)\n t2 = self.t2\n t2m = operation_table_type(self.t2, masked=True)\n\n # Result table is never masked but original column types are preserved\n t1m2m = table.join(t1m, t2m, join_type='inner')\n assert t1m2m.masked is False\n for col in t1m2m.itercols():\n assert type(col) is MaskedColumn\n\n # Result should match non-masked result\n t12 = table.join(t1, t2)\n assert np.all(t12.as_array() == np.array(t1m2m))\n\n # Mask out some values in both tables and make sure they propagate\n t1m['b'].mask[1] = True\n t1m['c'].mask[2] = True\n t2m['d'].mask[2] = True\n t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')\n assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',\n '--- --- --- --- ---',\n ' 1 -- L2 foo R1',\n ' 1 -- L2 foo R2',\n ' 1 bar -- foo R1',\n ' 1 bar -- foo R2',\n ' 2 bar L4 bar --'])\n\n def test_classes(self):\n \"\"\"Ensure that classes and subclasses get through as expected\"\"\"\n class MyCol(Column):\n pass\n\n class MyMaskedCol(MaskedColumn):\n pass\n\n t1 = Table()\n t1['a'] = MyCol([1])\n t1['b'] = MyCol([2])\n t1['c'] = MyMaskedCol([3])\n\n t2 = Table()\n t2['a'] = Column([1, 2])\n t2['d'] = MyCol([3, 4])\n t2['e'] = MyMaskedCol([5, 6])\n\n t12 = table.join(t1, t2, join_type='inner')\n for name, exp_type in (('a', MyCol), ('b', MyCol), ('c', MyMaskedCol),\n ('d', MyCol), ('e', MyMaskedCol)):\n assert type(t12[name] is exp_type)\n\n t21 = table.join(t2, t1, join_type='left')\n # Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be\n # masked, but col 'c' stays since MyMaskedCol supports masking.\n for name, exp_type in (('a', MyCol), ('b', MaskedColumn), ('c', MyMaskedCol),\n ('d', MyCol), ('e', MyMaskedCol)):\n assert type(t21[name] is exp_type)\n\n def test_col_rename(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"\n Test auto col renaming when there is a conflict. Use\n non-default values of uniq_col_name and table_names.\n \"\"\"\n t1 = self.t1\n t2 = self.t2\n t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',\n table_names=['L', 'R'], keys='a')\n assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']\n\n def test_rename_conflict(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"\n Test that auto-column rename fails because of a conflict\n with an existing column\n \"\"\"\n t1 = self.t1\n t2 = self.t2\n t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename\n with pytest.raises(TableMergeError):\n table.join(t1, t2, keys='a')\n\n def test_missing_keys(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Merge on a key column that doesn't exist\"\"\"\n t1 = self.t1\n t2 = self.t2\n with pytest.raises(TableMergeError):\n table.join(t1, t2, keys=['a', 'not there'])\n\n def test_bad_join_type(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Bad join_type input\"\"\"\n t1 = self.t1\n t2 = self.t2\n with pytest.raises(ValueError):\n table.join(t1, t2, join_type='illegal value')\n\n def test_no_common_keys(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Merge tables with no common keys\"\"\"\n t1 = self.t1\n t2 = self.t2\n del t1['a']\n del t1['b']\n del t2['a']\n del t2['b']\n with pytest.raises(TableMergeError):\n table.join(t1, t2)\n\n def test_masked_key_column(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Merge on a key column that has a masked element\"\"\"\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n t1 = self.t1\n t2 = operation_table_type(self.t2, masked=True)\n table.join(t1, t2) # OK\n t2['a'].mask[0] = True\n with pytest.raises(TableMergeError):\n table.join(t1, t2)\n\n def test_col_meta_merge(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t2.rename_column('d', 'c') # force col conflict and renaming\n meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])\n meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])\n\n # Key col 'a', should first value ('cm')\n t1['a'].unit = 'cm'\n t2['a'].unit = 'm'\n # Key col 'b', take first value 't1_b'\n t1['b'].info.description = 't1_b'\n # Key col 'b', take first non-empty value 't1_b'\n t2['b'].info.format = '%6s'\n # Key col 'a', should be merged meta\n t1['a'].info.meta = meta1\n t2['a'].info.meta = meta2\n # Key col 'b', should be meta2\n t2['b'].info.meta = meta2\n\n # All these should pass through\n t1['c'].info.format = '%3s'\n t1['c'].info.description = 't1_c'\n\n t2['c'].info.format = '%6s'\n t2['c'].info.description = 't2_c'\n\n if operation_table_type is Table:\n ctx = pytest.warns(metadata.MergeConflictWarning, match=r\"In merged column 'a' the 'unit' attribute does not match \\(cm != m\\)\") # noqa\n else:\n ctx = nullcontext()\n\n with ctx:\n t12 = table.join(t1, t2, keys=['a', 'b'])\n\n assert t12['a'].unit == 'm'\n assert t12['b'].info.description == 't1_b'\n assert t12['b'].info.format == '%6s'\n assert t12['a'].info.meta == self.meta_merge\n assert t12['b'].info.meta == meta2\n assert t12['c_1'].info.format == '%3s'\n assert t12['c_1'].info.description == 't1_c'\n assert t12['c_2'].info.format == '%6s'\n assert t12['c_2'].info.description == 't2_c'\n\n def test_join_multidimensional(self, operation_table_type):\n self._setup(operation_table_type)\n\n # Regression test for #2984, which was an issue where join did not work\n # on multi-dimensional columns.\n\n t1 = operation_table_type()\n t1['a'] = [1, 2, 3]\n t1['b'] = np.ones((3, 4))\n\n t2 = operation_table_type()\n t2['a'] = [1, 2, 3]\n t2['c'] = [4, 5, 6]\n\n t3 = table.join(t1, t2)\n\n np.testing.assert_allclose(t3['a'], t1['a'])\n np.testing.assert_allclose(t3['b'], t1['b'])\n np.testing.assert_allclose(t3['c'], t2['c'])\n\n def test_join_multidimensional_masked(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"\n Test for outer join with multidimensional columns where masking is required.\n (Issue #4059).\n \"\"\"\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n\n a = table.MaskedColumn([1, 2, 3], name='a')\n a2 = table.Column([1, 3, 4], name='a')\n b = table.MaskedColumn([[1, 2],\n [3, 4],\n [5, 6]],\n name='b',\n mask=[[1, 0],\n [0, 1],\n [0, 0]])\n c = table.Column([[1, 1],\n [2, 2],\n [3, 3]],\n name='c')\n t1 = operation_table_type([a, b])\n t2 = operation_table_type([a2, c])\n t12 = table.join(t1, t2, join_type='inner')\n\n assert np.all(t12['b'].mask == [[True, False],\n [False, False]])\n assert not hasattr(t12['c'], 'mask')\n\n t12 = table.join(t1, t2, join_type='outer')\n assert np.all(t12['b'].mask == [[True, False],\n [False, True],\n [False, False],\n [True, True]])\n assert np.all(t12['c'].mask == [[False, False],\n [True, True],\n [False, False],\n [False, False]])\n\n def test_mixin_functionality(self, mixin_cols):\n col = mixin_cols['m']\n cls_name = type(col).__name__\n len_col = len(col)\n idx = np.arange(len_col)\n t1 = table.QTable([idx, col], names=['idx', 'm1'])\n t2 = table.QTable([idx, col], names=['idx', 'm2'])\n # Set up join mismatches for different join_type cases\n t1 = t1[[0, 1, 3]]\n t2 = t2[[0, 2, 3]]\n\n # Test inner join, which works for all mixin_cols\n out = table.join(t1, t2, join_type='inner')\n assert len(out) == 2\n assert out['m2'].__class__ is col.__class__\n assert np.all(out['idx'] == [0, 3])\n if cls_name == 'SkyCoord':\n # SkyCoord doesn't support __eq__ so use our own\n assert skycoord_equal(out['m1'], col[[0, 3]])\n assert skycoord_equal(out['m2'], col[[0, 3]])\n elif 'Repr' in cls_name or 'Diff' in cls_name:\n assert np.all(representation_equal(out['m1'], col[[0, 3]]))\n assert np.all(representation_equal(out['m2'], col[[0, 3]]))\n else:\n assert np.all(out['m1'] == col[[0, 3]])\n assert np.all(out['m2'] == col[[0, 3]])\n\n # Check for left, right, outer join which requires masking. Works for\n # the listed mixins classes.\n if isinstance(col, (Quantity, Time, TimeDelta)):\n out = table.join(t1, t2, join_type='left')\n assert len(out) == 3\n assert np.all(out['idx'] == [0, 1, 3])\n assert np.all(out['m1'] == t1['m1'])\n assert np.all(out['m2'] == t2['m2'])\n check_mask(out['m1'], [False, False, False])\n check_mask(out['m2'], [False, True, False])\n\n out = table.join(t1, t2, join_type='right')\n assert len(out) == 3\n assert np.all(out['idx'] == [0, 2, 3])\n assert np.all(out['m1'] == t1['m1'])\n assert np.all(out['m2'] == t2['m2'])\n check_mask(out['m1'], [False, True, False])\n check_mask(out['m2'], [False, False, False])\n\n out = table.join(t1, t2, join_type='outer')\n assert len(out) == 4\n assert np.all(out['idx'] == [0, 1, 2, 3])\n assert np.all(out['m1'] == col)\n assert np.all(out['m2'] == col)\n assert check_mask(out['m1'], [False, False, True, False])\n assert check_mask(out['m2'], [False, True, False, False])\n else:\n # Otherwise make sure it fails with the right exception message\n for join_type in ('outer', 'left', 'right'):\n with pytest.raises(NotImplementedError) as err:\n table.join(t1, t2, join_type=join_type)\n assert ('join requires masking' in str(err.value)\n or 'join unavailable' in str(err.value))\n\n def test_cartesian_join(self, operation_table_type):\n t1 = Table(rows=[(1, 'a'),\n (2, 'b')], names=['a', 'b'])\n t2 = Table(rows=[(3, 'c'),\n (4, 'd')], names=['a', 'c'])\n t12 = table.join(t1, t2, join_type='cartesian')\n\n assert t1.colnames == ['a', 'b']\n assert t2.colnames == ['a', 'c']\n assert len(t12) == len(t1) * len(t2)\n assert str(t12).splitlines() == [\n 'a_1 b a_2 c ',\n '--- --- --- ---',\n ' 1 a 3 c',\n ' 1 a 4 d',\n ' 2 b 3 c',\n ' 2 b 4 d']\n\n with pytest.raises(ValueError, match='cannot supply keys for a cartesian join'):\n t12 = table.join(t1, t2, join_type='cartesian', keys='a')\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_join_with_join_skycoord_sky(self):\n sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')\n sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')\n t1 = Table([sc1], names=['sc'])\n t2 = Table([sc2], names=['sc'])\n t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})\n exp = ['sc_id sc_1 sc_2 ',\n ' deg,deg deg,deg ',\n '----- ------- --------',\n ' 1 1.0,0.0 1.05,0.0',\n ' 1 1.1,0.0 1.05,0.0',\n ' 2 2.0,0.0 2.1,0.0']\n assert str(t12).splitlines() == exp\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.parametrize('distance_func', ['search_around_3d', search_around_3d])\n def test_join_with_join_skycoord_3d(self, distance_func):\n sc1 = SkyCoord([0, 1, 1.1, 2]*u.deg, [0, 0, 0, 0]*u.deg, [1, 1, 2, 1]*u.m)\n sc2 = SkyCoord([0.5, 1.05, 2.1]*u.deg, [0, 0, 0]*u.deg, [1, 1, 1]*u.m)\n t1 = Table([sc1], names=['sc'])\n t2 = Table([sc2], names=['sc'])\n join_func = join_skycoord(np.deg2rad(0.2) * u.m,\n distance_func=distance_func)\n t12 = table.join(t1, t2, join_funcs={'sc': join_func})\n exp = ['sc_id sc_1 sc_2 ',\n ' deg,deg,m deg,deg,m ',\n '----- ----------- ------------',\n ' 1 1.0,0.0,1.0 1.05,0.0,1.0',\n ' 2 2.0,0.0,1.0 2.1,0.0,1.0']\n assert str(t12).splitlines() == exp\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_join_with_join_distance_1d(self):\n c1 = [0, 1, 1.1, 2]\n c2 = [0.5, 1.05, 2.1]\n t1 = Table([c1], names=['col'])\n t2 = Table([c2], names=['col'])\n join_func = join_distance(0.2,\n kdtree_args={'leafsize': 32},\n query_args={'p': 2})\n t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func})\n exp = ['col_id col_1 col_2',\n '------ ----- -----',\n ' 1 1.0 1.05',\n ' 1 1.1 1.05',\n ' 2 2.0 2.1',\n ' 3 0.0 --',\n ' 4 -- 0.5']\n assert str(t12).splitlines() == exp\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_join_with_join_distance_1d_multikey(self):\n from astropy.table.operations import _apply_join_funcs\n\n c1 = [0, 1, 1.1, 1.2, 2]\n id1 = [0, 1, 2, 2, 3]\n o1 = ['a', 'b', 'c', 'd', 'e']\n c2 = [0.5, 1.05, 2.1]\n id2 = [0, 2, 4]\n o2 = ['z', 'y', 'x']\n t1 = Table([c1, id1, o1], names=['col', 'id', 'o1'])\n t2 = Table([c2, id2, o2], names=['col', 'id', 'o2'])\n join_func = join_distance(0.2)\n join_funcs = {'col': join_func}\n t12 = table.join(t1, t2, join_type='outer', join_funcs=join_funcs)\n exp = ['col_id col_1 id o1 col_2 o2',\n '------ ----- --- --- ----- ---',\n ' 1 1.0 1 b -- --',\n ' 1 1.1 2 c 1.05 y',\n ' 1 1.2 2 d 1.05 y',\n ' 2 2.0 3 e -- --',\n ' 2 -- 4 -- 2.1 x',\n ' 3 0.0 0 a -- --',\n ' 4 -- 0 -- 0.5 z']\n assert str(t12).splitlines() == exp\n\n left, right, keys = _apply_join_funcs(t1, t2, ('col', 'id'), join_funcs)\n assert keys == ('col_id', 'id')\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_join_with_join_distance_1d_quantity(self):\n c1 = [0, 1, 1.1, 2] * u.m\n c2 = [500, 1050, 2100] * u.mm\n t1 = QTable([c1], names=['col'])\n t2 = QTable([c2], names=['col'])\n join_func = join_distance(20 * u.cm)\n t12 = table.join(t1, t2, join_funcs={'col': join_func})\n exp = ['col_id col_1 col_2 ',\n ' m mm ',\n '------ ----- ------',\n ' 1 1.0 1050.0',\n ' 1 1.1 1050.0',\n ' 2 2.0 2100.0']\n assert str(t12).splitlines() == exp\n\n # Generate column name conflict\n t2['col_id'] = [0, 0, 0]\n t2['col__id'] = [0, 0, 0]\n t12 = table.join(t1, t2, join_funcs={'col': join_func})\n exp = ['col___id col_1 col_2 col_id col__id',\n ' m mm ',\n '-------- ----- ------ ------ -------',\n ' 1 1.0 1050.0 0 0',\n ' 1 1.1 1050.0 0 0',\n ' 2 2.0 2100.0 0 0']\n assert str(t12).splitlines() == exp\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_join_with_join_distance_2d(self):\n c1 = np.array([[0, 1, 1.1, 2],\n [0, 0, 1, 0]]).transpose()\n c2 = np.array([[0.5, 1.05, 2.1],\n [0, 0, 0]]).transpose()\n t1 = Table([c1], names=['col'])\n t2 = Table([c2], names=['col'])\n join_func = join_distance(0.2,\n kdtree_args={'leafsize': 32},\n query_args={'p': 2})\n t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_func})\n exp = ['col_id col_1 col_2 ',\n f'{t12[\"col_id\"].dtype.name} float64[2] float64[2]', # int32 or int64\n '------ ---------- -----------',\n ' 1 1.0 .. 0.0 1.05 .. 0.0',\n ' 2 2.0 .. 0.0 2.1 .. 0.0',\n ' 3 0.0 .. 0.0 -- .. --',\n ' 4 1.1 .. 1.0 -- .. --',\n ' 5 -- .. -- 0.5 .. 0.0']\n assert t12.pformat(show_dtype=True) == exp\n\n def test_keys_left_right_basic(self):\n \"\"\"Test using the keys_left and keys_right args to specify different\n join keys. This takes the standard test case but renames column 'a'\n to 'x' and 'y' respectively for tables 1 and 2. Then it compares the\n normal join on 'a' to the new join on 'x' and 'y'.\"\"\"\n self._setup()\n\n for join_type in ('inner', 'left', 'right', 'outer'):\n t1 = self.t1.copy()\n t2 = self.t2.copy()\n # Expected is same as joining on 'a' but with names 'x', 'y' instead\n t12_exp = table.join(t1, t2, keys='a', join_type=join_type)\n t12_exp.add_column(t12_exp['a'], name='x', index=1)\n t12_exp.add_column(t12_exp['a'], name='y', index=len(t1.colnames) + 1)\n del t12_exp['a']\n\n # Different key names\n t1.rename_column('a', 'x')\n t2.rename_column('a', 'y')\n keys_left_list = ['x'] # Test string key name\n keys_right_list = [['y']] # Test list of string key names\n if join_type == 'outer':\n # Just do this for the outer join (others are the same)\n keys_left_list.append([t1['x'].tolist()]) # Test list key column\n keys_right_list.append([t2['y']]) # Test Column key column\n\n for keys_left, keys_right in zip(keys_left_list, keys_right_list):\n t12 = table.join(t1, t2, keys_left=keys_left, keys_right=keys_right,\n join_type=join_type)\n\n assert t12.colnames == t12_exp.colnames\n for col in t12.values_equal(t12_exp).itercols():\n assert np.all(col)\n assert t12_exp.meta == t12.meta\n\n def test_keys_left_right_exceptions(self):\n \"\"\"Test exceptions using the keys_left and keys_right args to specify\n different join keys.\n \"\"\"\n self._setup()\n t1 = self.t1\n t2 = self.t2\n\n msg = r\"left table does not have key column 'z'\"\n with pytest.raises(ValueError, match=msg):\n table.join(t1, t2, keys_left='z', keys_right=['a'])\n\n msg = r\"left table has different length from key \\[1, 2\\]\"\n with pytest.raises(ValueError, match=msg):\n table.join(t1, t2, keys_left=[[1, 2]], keys_right=['a'])\n\n msg = r\"keys arg must be None if keys_left and keys_right are supplied\"\n with pytest.raises(ValueError, match=msg):\n table.join(t1, t2, keys_left='z', keys_right=['a'], keys='a')\n\n msg = r\"keys_left and keys_right args must have same length\"\n with pytest.raises(ValueError, match=msg):\n table.join(t1, t2, keys_left=['a', 'b'], keys_right=['a'])\n\n msg = r\"keys_left and keys_right must both be provided\"\n with pytest.raises(ValueError, match=msg):\n table.join(t1, t2, keys_left=['a', 'b'])\n\n msg = r\"cannot supply join_funcs arg and keys_left / keys_right\"\n with pytest.raises(ValueError, match=msg):\n table.join(t1, t2, keys_left=['a'], keys_right=['a'], join_funcs={})\n\n def test_join_structured_column(self):\n \"\"\"Regression tests for gh-13271.\"\"\"\n # Two tables with matching names, including a structured column.\n t1 = Table([np.array([(1., 1), (2., 2)], dtype=[('f', 'f8'), ('i', 'i8')]),\n ['one', 'two']], names=['structured', 'string'])\n t2 = Table([np.array([(2., 2), (4., 4)], dtype=[('f', 'f8'), ('i', 'i8')]),\n ['three', 'four']], names=['structured', 'string'])\n t12 = table.join(t1, t2, ['structured'], join_type='outer')\n assert t12.pformat() == [\n 'structured [f, i] string_1 string_2',\n '----------------- -------- --------',\n ' (1., 1) one --',\n ' (2., 2) two three',\n ' (4., 4) -- four']\n\n\nclass TestSetdiff():\n\n def _setup(self, t_cls=Table):\n lines1 = [' a b ',\n ' 0 foo ',\n ' 1 foo ',\n ' 1 bar ',\n ' 2 bar ']\n lines2 = [' a b ',\n ' 0 foo ',\n ' 3 foo ',\n ' 4 bar ',\n ' 2 bar ']\n lines3 = [' a b d ',\n ' 0 foo R1',\n ' 8 foo R2',\n ' 1 bar R3',\n ' 4 bar R4']\n self.t1 = t_cls.read(lines1, format='ascii')\n self.t2 = t_cls.read(lines2, format='ascii')\n self.t3 = t_cls.read(lines3, format='ascii')\n\n def test_default_same_columns(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.setdiff(self.t1, self.t2)\n assert type(out['a']) is type(self.t1['a']) # noqa\n assert type(out['b']) is type(self.t1['b']) # noqa\n assert out.pformat() == [' a b ',\n '--- ---',\n ' 1 bar',\n ' 1 foo']\n\n def test_default_same_tables(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.setdiff(self.t1, self.t1)\n\n assert type(out['a']) is type(self.t1['a']) # noqa\n assert type(out['b']) is type(self.t1['b']) # noqa\n assert out.pformat() == [' a b ',\n '--- ---']\n\n def test_extra_col_left_table(self, operation_table_type):\n self._setup(operation_table_type)\n\n with pytest.raises(ValueError):\n table.setdiff(self.t3, self.t1)\n\n def test_extra_col_right_table(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.setdiff(self.t1, self.t3)\n\n assert type(out['a']) is type(self.t1['a']) # noqa\n assert type(out['b']) is type(self.t1['b']) # noqa\n assert out.pformat() == [' a b ',\n '--- ---',\n ' 1 foo',\n ' 2 bar']\n\n def test_keys(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.setdiff(self.t3, self.t1, keys=['a', 'b'])\n\n assert type(out['a']) is type(self.t1['a']) # noqa\n assert type(out['b']) is type(self.t1['b']) # noqa\n assert out.pformat() == [' a b d ',\n '--- --- ---',\n ' 4 bar R4',\n ' 8 foo R2']\n\n def test_missing_key(self, operation_table_type):\n self._setup(operation_table_type)\n\n with pytest.raises(ValueError):\n table.setdiff(self.t3, self.t1, keys=['a', 'd'])\n\n\nclass TestVStack():\n\n def _setup(self, t_cls=Table):\n self.t1 = t_cls.read([' a b',\n ' 0. foo',\n ' 1. bar'], format='ascii')\n\n self.t2 = t_cls.read([' a b c',\n ' 2. pez 4',\n ' 3. sez 5'], format='ascii')\n\n self.t3 = t_cls.read([' a b',\n ' 4. 7',\n ' 5. 8',\n ' 6. 9'], format='ascii')\n self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)\n\n # The following table has meta-data that conflicts with t1\n self.t5 = t_cls(self.t1, copy=True)\n\n self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))\n self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))\n self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))\n self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),\n ('c', {'a': 1, 'b': 1, 'c': 1}),\n ('d', 1),\n ('a', 1),\n ('e', 1)])\n\n def test_validate_join_type(self):\n self._setup()\n with pytest.raises(TypeError, match='Did you accidentally call vstack'):\n table.vstack(self.t1, self.t2)\n\n def test_stack_rows(self, operation_table_type):\n self._setup(operation_table_type)\n t2 = self.t1.copy()\n t2.meta.clear()\n out = table.vstack([self.t1, t2[1]])\n assert type(out['a']) is type(self.t1['a']) # noqa\n assert type(out['b']) is type(self.t1['b']) # noqa\n assert out.pformat() == [' a b ',\n '--- ---',\n '0.0 foo',\n '1.0 bar',\n '1.0 bar']\n\n def test_stack_table_column(self, operation_table_type):\n self._setup(operation_table_type)\n t2 = self.t1.copy()\n t2.meta.clear()\n out = table.vstack([self.t1, t2['a']])\n assert out.masked is False\n assert out.pformat() == [' a b ',\n '--- ---',\n '0.0 foo',\n '1.0 bar',\n '0.0 --',\n '1.0 --']\n\n def test_table_meta_merge(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.vstack([self.t1, self.t2, self.t4], join_type='inner')\n assert out.meta == self.meta_merge\n\n def test_table_meta_merge_conflict(self, operation_table_type):\n self._setup(operation_table_type)\n\n with pytest.warns(metadata.MergeConflictWarning) as w:\n out = table.vstack([self.t1, self.t5], join_type='inner')\n assert len(w) == 2\n\n assert out.meta == self.t5.meta\n\n with pytest.warns(metadata.MergeConflictWarning) as w:\n out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')\n assert len(w) == 2\n\n assert out.meta == self.t5.meta\n\n out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')\n\n assert out.meta == self.t5.meta\n\n with pytest.raises(MergeConflictError):\n out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')\n\n with pytest.raises(ValueError):\n out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')\n\n def test_bad_input_type(self, operation_table_type):\n self._setup(operation_table_type)\n with pytest.raises(ValueError):\n table.vstack([])\n with pytest.raises(TypeError):\n table.vstack(1)\n with pytest.raises(TypeError):\n table.vstack([self.t2, 1])\n with pytest.raises(ValueError):\n table.vstack([self.t1, self.t2], join_type='invalid join type')\n\n def test_stack_basic_inner(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t4 = self.t4\n\n t12 = table.vstack([t1, t2], join_type='inner')\n assert t12.masked is False\n assert type(t12) is operation_table_type\n assert type(t12['a']) is type(t1['a']) # noqa\n assert type(t12['b']) is type(t1['b']) # noqa\n assert t12.pformat() == [' a b ',\n '--- ---',\n '0.0 foo',\n '1.0 bar',\n '2.0 pez',\n '3.0 sez']\n\n t124 = table.vstack([t1, t2, t4], join_type='inner')\n assert type(t124) is operation_table_type\n assert type(t12['a']) is type(t1['a']) # noqa\n assert type(t12['b']) is type(t1['b']) # noqa\n assert t124.pformat() == [' a b ',\n '--- ---',\n '0.0 foo',\n '1.0 bar',\n '2.0 pez',\n '3.0 sez',\n '0.0 foo',\n '1.0 bar']\n\n def test_stack_basic_outer(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t4 = self.t4\n t12 = table.vstack([t1, t2], join_type='outer')\n assert t12.masked is False\n assert t12.pformat() == [' a b c ',\n '--- --- ---',\n '0.0 foo --',\n '1.0 bar --',\n '2.0 pez 4',\n '3.0 sez 5']\n\n t124 = table.vstack([t1, t2, t4], join_type='outer')\n assert t124.masked is False\n assert t124.pformat() == [' a b c ',\n '--- --- ---',\n '0.0 foo --',\n '1.0 bar --',\n '2.0 pez 4',\n '3.0 sez 5',\n '0.0 foo --',\n '1.0 bar --']\n\n def test_stack_incompatible(self, operation_table_type):\n self._setup(operation_table_type)\n with pytest.raises(TableMergeError) as excinfo:\n table.vstack([self.t1, self.t3], join_type='inner')\n assert (\"The 'b' columns have incompatible types: {}\"\n .format([self.t1['b'].dtype.name, self.t3['b'].dtype.name])\n in str(excinfo.value))\n\n with pytest.raises(TableMergeError) as excinfo:\n table.vstack([self.t1, self.t3], join_type='outer')\n assert \"The 'b' columns have incompatible types:\" in str(excinfo.value)\n\n with pytest.raises(TableMergeError):\n table.vstack([self.t1, self.t2], join_type='exact')\n\n t1_reshape = self.t1.copy()\n t1_reshape['b'].shape = [2, 1]\n with pytest.raises(TableMergeError) as excinfo:\n table.vstack([self.t1, t1_reshape])\n assert \"have different shape\" in str(excinfo.value)\n\n def test_vstack_one_masked(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t4 = self.t4\n t4['b'].mask[1] = True\n t14 = table.vstack([t1, t4])\n assert t14.masked is False\n assert t14.pformat() == [' a b ',\n '--- ---',\n '0.0 foo',\n '1.0 bar',\n '0.0 foo',\n '1.0 --']\n\n def test_col_meta_merge_inner(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t4 = self.t4\n\n # Key col 'a', should last value ('km')\n t1['a'].info.unit = 'cm'\n t2['a'].info.unit = 'm'\n t4['a'].info.unit = 'km'\n\n # Key col 'a' format should take last when all match\n t1['a'].info.format = '%f'\n t2['a'].info.format = '%f'\n t4['a'].info.format = '%f'\n\n # Key col 'b', take first value 't1_b'\n t1['b'].info.description = 't1_b'\n\n # Key col 'b', take first non-empty value '%6s'\n t4['b'].info.format = '%6s'\n\n # Key col 'a', should be merged meta\n t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))\n t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))\n\n # Key col 'b', should be meta2\n t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n\n if operation_table_type is Table:\n ctx = pytest.warns(metadata.MergeConflictWarning)\n else:\n ctx = nullcontext()\n\n with ctx as warning_lines:\n out = table.vstack([t1, t2, t4], join_type='inner')\n\n if operation_table_type is Table:\n assert len(warning_lines) == 2\n assert (\"In merged column 'a' the 'unit' attribute does not match (cm != m)\"\n in str(warning_lines[0].message))\n assert (\"In merged column 'a' the 'unit' attribute does not match (m != km)\"\n in str(warning_lines[1].message))\n # Check units are suitably ignored for a regular Table\n assert out.pformat() == [' a b ',\n ' km ',\n '-------- ------',\n '0.000000 foo',\n '1.000000 bar',\n '2.000000 pez',\n '3.000000 sez',\n '0.000000 foo',\n '1.000000 bar']\n else:\n # Check QTable correctly dealt with units.\n assert out.pformat() == [' a b ',\n ' km ',\n '-------- ------',\n '0.000000 foo',\n '0.000010 bar',\n '0.002000 pez',\n '0.003000 sez',\n '0.000000 foo',\n '1.000000 bar']\n assert out['a'].info.unit == 'km'\n assert out['a'].info.format == '%f'\n assert out['b'].info.description == 't1_b'\n assert out['b'].info.format == '%6s'\n assert out['a'].info.meta == self.meta_merge\n assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])\n\n def test_col_meta_merge_outer(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t4 = self.t4\n\n # Key col 'a', should last value ('km')\n t1['a'].unit = 'cm'\n t2['a'].unit = 'm'\n t4['a'].unit = 'km'\n\n # Key col 'a' format should take last when all match\n t1['a'].info.format = '%0d'\n t2['a'].info.format = '%0d'\n t4['a'].info.format = '%0d'\n\n # Key col 'b', take first value 't1_b'\n t1['b'].info.description = 't1_b'\n\n # Key col 'b', take first non-empty value '%6s'\n t4['b'].info.format = '%6s'\n\n # Key col 'a', should be merged meta\n t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))\n t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))\n\n # Key col 'b', should be meta2\n t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n\n # All these should pass through\n t2['c'].unit = 'm'\n t2['c'].info.format = '%6s'\n t2['c'].info.description = 't2_c'\n\n with pytest.warns(metadata.MergeConflictWarning) as warning_lines:\n out = table.vstack([t1, t2, t4], join_type='outer')\n\n assert len(warning_lines) == 2\n assert (\"In merged column 'a' the 'unit' attribute does not match (cm != m)\"\n in str(warning_lines[0].message))\n assert (\"In merged column 'a' the 'unit' attribute does not match (m != km)\"\n in str(warning_lines[1].message))\n assert out['a'].unit == 'km'\n assert out['a'].info.format == '%0d'\n assert out['b'].info.description == 't1_b'\n assert out['b'].info.format == '%6s'\n assert out['a'].info.meta == self.meta_merge\n assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])\n assert out['c'].info.unit == 'm'\n assert out['c'].info.format == '%6s'\n assert out['c'].info.description == 't2_c'\n\n def test_vstack_one_table(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Regression test for issue #3313\"\"\"\n assert (self.t1 == table.vstack(self.t1)).all()\n assert (self.t1 == table.vstack([self.t1])).all()\n\n def test_mixin_functionality(self, mixin_cols):\n col = mixin_cols['m']\n len_col = len(col)\n t = table.QTable([col], names=['a'])\n cls_name = type(col).__name__\n\n # Vstack works for these classes:\n if isinstance(col, (u.Quantity, Time, TimeDelta, SkyCoord, EarthLocation,\n BaseRepresentationOrDifferential)):\n out = table.vstack([t, t])\n assert len(out) == len_col * 2\n if cls_name == 'SkyCoord':\n # Argh, SkyCoord needs __eq__!!\n assert skycoord_equal(out['a'][len_col:], col)\n assert skycoord_equal(out['a'][:len_col], col)\n elif 'Repr' in cls_name or 'Diff' in cls_name:\n assert np.all(representation_equal(out['a'][:len_col], col))\n assert np.all(representation_equal(out['a'][len_col:], col))\n else:\n assert np.all(out['a'][:len_col] == col)\n assert np.all(out['a'][len_col:] == col)\n else:\n with pytest.raises(NotImplementedError) as err:\n table.vstack([t, t])\n assert ('vstack unavailable for mixin column type(s): {}'\n .format(cls_name) in str(err.value))\n\n # Check for outer stack which requires masking. Only Time supports\n # this currently.\n t2 = table.QTable([col], names=['b']) # different from col name for t\n if isinstance(col, (Time, TimeDelta, Quantity)):\n out = table.vstack([t, t2], join_type='outer')\n assert len(out) == len_col * 2\n assert np.all(out['a'][:len_col] == col)\n assert np.all(out['b'][len_col:] == col)\n assert check_mask(out['a'], [False] * len_col + [True] * len_col)\n assert check_mask(out['b'], [True] * len_col + [False] * len_col)\n # check directly stacking mixin columns:\n out2 = table.vstack([t, t2['b']])\n assert np.all(out['a'] == out2['a'])\n assert np.all(out['b'] == out2['b'])\n else:\n with pytest.raises(NotImplementedError) as err:\n table.vstack([t, t2], join_type='outer')\n assert ('vstack requires masking' in str(err.value)\n or 'vstack unavailable' in str(err.value))\n\n def test_vstack_different_representation(self):\n \"\"\"Test that representations can be mixed together.\"\"\"\n rep1 = CartesianRepresentation([1, 2]*u.km, [3, 4]*u.km, 1*u.km)\n rep2 = SphericalRepresentation([0]*u.deg, [0]*u.deg, 10*u.km)\n t1 = Table([rep1])\n t2 = Table([rep2])\n t12 = table.vstack([t1, t2])\n expected = CartesianRepresentation([1, 2, 10]*u.km,\n [3, 4, 0]*u.km,\n [1, 1, 0]*u.km)\n assert np.all(representation_equal(t12['col0'], expected))\n\n rep3 = UnitSphericalRepresentation([0]*u.deg, [0]*u.deg)\n t3 = Table([rep3])\n with pytest.raises(ValueError, match='representations are inconsistent'):\n table.vstack([t1, t3])\n\n def test_vstack_structured_column(self):\n \"\"\"Regression tests for gh-13271.\"\"\"\n # Two tables with matching names, including a structured column.\n t1 = Table([np.array([(1., 1), (2., 2)], dtype=[('f', 'f8'), ('i', 'i8')]),\n ['one', 'two']], names=['structured', 'string'])\n t2 = Table([np.array([(3., 3), (4., 4)], dtype=[('f', 'f8'), ('i', 'i8')]),\n ['three', 'four']], names=['structured', 'string'])\n t12 = table.vstack([t1, t2])\n assert t12.pformat() == [\n 'structured [f, i] string',\n '----------------- ------',\n ' (1., 1) one',\n ' (2., 2) two',\n ' (3., 3) three',\n ' (4., 4) four']\n\n # One table without the structured column.\n t3 = t2[('string',)]\n t13 = table.vstack([t1, t3])\n assert t13.pformat() == [\n 'structured [f, i] string',\n '----------------- ------',\n ' (1.0, 1) one',\n ' (2.0, 2) two',\n ' -- three',\n ' -- four']\n\n\nclass TestDStack():\n\n def _setup(self, t_cls=Table):\n self.t1 = t_cls.read([' a b',\n ' 0. foo',\n ' 1. bar'], format='ascii')\n\n self.t2 = t_cls.read([' a b c',\n ' 2. pez 4',\n ' 3. sez 5'], format='ascii')\n self.t2['d'] = Time([1, 2], format='cxcsec')\n\n self.t3 = t_cls({'a': [[5., 6.], [4., 3.]],\n 'b': [['foo', 'bar'], ['pez', 'sez']]},\n names=('a', 'b'))\n\n self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)\n\n self.t5 = t_cls({'a': [[4., 2.], [1., 6.]],\n 'b': [['foo', 'pez'], ['bar', 'sez']]},\n names=('a', 'b'))\n self.t6 = t_cls.read([' a b c',\n ' 7. pez 2',\n ' 4. sez 6',\n ' 6. foo 3'], format='ascii')\n\n def test_validate_join_type(self):\n self._setup()\n with pytest.raises(TypeError, match='Did you accidentally call dstack'):\n table.dstack(self.t1, self.t2)\n\n @staticmethod\n def compare_dstack(tables, out):\n for ii, tbl in enumerate(tables):\n for name, out_col in out.columns.items():\n if name in tbl.colnames:\n # Columns always compare equal\n assert np.all(tbl[name] == out[name][:, ii])\n\n # If input has a mask then output must have same mask\n if hasattr(tbl[name], 'mask'):\n assert np.all(tbl[name].mask == out[name].mask[:, ii])\n\n # If input has no mask then output might have a mask (if other table\n # is missing that column). If so then all mask values should be False.\n elif hasattr(out[name], 'mask'):\n assert not np.any(out[name].mask[:, ii])\n\n else:\n # Column missing for this table, out must have a mask with all True.\n assert np.all(out[name].mask[:, ii])\n\n def test_dstack_table_column(self, operation_table_type):\n \"\"\"Stack a table with 3 cols and one column (gets auto-converted to Table).\n \"\"\"\n self._setup(operation_table_type)\n t2 = self.t1.copy()\n out = table.dstack([self.t1, t2['a']])\n self.compare_dstack([self.t1, t2[('a',)]], out)\n\n def test_dstack_basic_outer(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail('Quantity columns do not support masking.')\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t4 = self.t4\n t4['a'].mask[0] = True\n # Test for non-masked table\n t12 = table.dstack([t1, t2], join_type='outer')\n assert type(t12) is operation_table_type\n assert type(t12['a']) is type(t1['a']) # noqa\n assert type(t12['b']) is type(t1['b']) # noqa\n self.compare_dstack([t1, t2], t12)\n\n # Test for masked table\n t124 = table.dstack([t1, t2, t4], join_type='outer')\n assert type(t124) is operation_table_type\n assert type(t124['a']) is type(t4['a']) # noqa\n assert type(t124['b']) is type(t4['b']) # noqa\n self.compare_dstack([t1, t2, t4], t124)\n\n def test_dstack_basic_inner(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t4 = self.t4\n\n # Test for masked table\n t124 = table.dstack([t1, t2, t4], join_type='inner')\n assert type(t124) is operation_table_type\n assert type(t124['a']) is type(t4['a']) # noqa\n assert type(t124['b']) is type(t4['b']) # noqa\n self.compare_dstack([t1, t2, t4], t124)\n\n def test_dstack_multi_dimension_column(self, operation_table_type):\n self._setup(operation_table_type)\n t3 = self.t3\n t5 = self.t5\n t2 = self.t2\n t35 = table.dstack([t3, t5])\n assert type(t35) is operation_table_type\n assert type(t35['a']) is type(t3['a']) # noqa\n assert type(t35['b']) is type(t3['b']) # noqa\n self.compare_dstack([t3, t5], t35)\n\n with pytest.raises(TableMergeError):\n table.dstack([t2, t3])\n\n def test_dstack_different_length_table(self, operation_table_type):\n self._setup(operation_table_type)\n t2 = self.t2\n t6 = self.t6\n with pytest.raises(ValueError):\n table.dstack([t2, t6])\n\n def test_dstack_single_table(self):\n self._setup(Table)\n out = table.dstack(self.t1)\n assert np.all(out == self.t1)\n\n def test_dstack_representation(self):\n rep1 = SphericalRepresentation([1, 2]*u.deg, [3, 4]*u.deg, 1*u.kpc)\n rep2 = SphericalRepresentation([10, 20]*u.deg, [30, 40]*u.deg, 10*u.kpc)\n t1 = Table([rep1])\n t2 = Table([rep2])\n t12 = table.dstack([t1, t2])\n assert np.all(representation_equal(t12['col0'][:, 0], rep1))\n assert np.all(representation_equal(t12['col0'][:, 1], rep2))\n\n def test_dstack_skycoord(self):\n sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)\n sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)\n t1 = Table([sc1])\n t2 = Table([sc2])\n t12 = table.dstack([t1, t2])\n assert skycoord_equal(sc1, t12['col0'][:, 0])\n assert skycoord_equal(sc2, t12['col0'][:, 1])\n\n def test_dstack_structured_column(self):\n \"\"\"Regression tests for gh-13271.\"\"\"\n # Two tables with matching names, including a structured column.\n t1 = Table([np.array([(1., 1), (2., 2)], dtype=[('f', 'f8'), ('i', 'i8')]),\n ['one', 'two']], names=['structured', 'string'])\n t2 = Table([np.array([(3., 3), (4., 4)], dtype=[('f', 'f8'), ('i', 'i8')]),\n ['three', 'four']], names=['structured', 'string'])\n t12 = table.dstack([t1, t2])\n assert t12.pformat() == [\n 'structured [f, i] string ',\n '------------------ ------------',\n '(1., 1) .. (3., 3) one .. three',\n '(2., 2) .. (4., 4) two .. four']\n\n # One table without the structured column.\n t3 = t2[('string',)]\n t13 = table.dstack([t1, t3])\n assert t13.pformat() == [\n 'structured [f, i] string ',\n '----------------- ------------',\n ' (1.0, 1) .. -- one .. three',\n ' (2.0, 2) .. -- two .. four']\n\n\nclass TestHStack():\n\n def _setup(self, t_cls=Table):\n self.t1 = t_cls.read([' a b',\n ' 0. foo',\n ' 1. bar'], format='ascii')\n\n self.t2 = t_cls.read([' a b c',\n ' 2. pez 4',\n ' 3. sez 5'], format='ascii')\n\n self.t3 = t_cls.read([' d e',\n ' 4. 7',\n ' 5. 8',\n ' 6. 9'], format='ascii')\n self.t4 = t_cls(self.t1, copy=True, masked=True)\n self.t4['a'].name = 'f'\n self.t4['b'].name = 'g'\n\n # The following table has meta-data that conflicts with t1\n self.t5 = t_cls(self.t1, copy=True)\n\n self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))\n self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))\n self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))\n self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),\n ('c', {'a': 1, 'b': 1, 'c': 1}),\n ('d', 1),\n ('a', 1),\n ('e', 1)])\n\n def test_validate_join_type(self):\n self._setup()\n with pytest.raises(TypeError, match='Did you accidentally call hstack'):\n table.hstack(self.t1, self.t2)\n\n def test_stack_same_table(self, operation_table_type):\n \"\"\"\n From #2995, test that hstack'ing references to the same table has the\n expected output.\n \"\"\"\n self._setup(operation_table_type)\n out = table.hstack([self.t1, self.t1])\n assert out.masked is False\n assert out.pformat() == ['a_1 b_1 a_2 b_2',\n '--- --- --- ---',\n '0.0 foo 0.0 foo',\n '1.0 bar 1.0 bar']\n\n def test_stack_rows(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.hstack([self.t1[0], self.t2[1]])\n assert out.masked is False\n assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',\n '--- --- --- --- ---',\n '0.0 foo 3.0 sez 5']\n\n def test_stack_columns(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.hstack([self.t1, self.t2['c']])\n assert type(out['a']) is type(self.t1['a']) # noqa\n assert type(out['b']) is type(self.t1['b']) # noqa\n assert type(out['c']) is type(self.t2['c']) # noqa\n assert out.pformat() == [' a b c ',\n '--- --- ---',\n '0.0 foo 4',\n '1.0 bar 5']\n\n def test_table_meta_merge(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.hstack([self.t1, self.t2, self.t4], join_type='inner')\n assert out.meta == self.meta_merge\n\n def test_table_meta_merge_conflict(self, operation_table_type):\n self._setup(operation_table_type)\n\n with pytest.warns(metadata.MergeConflictWarning) as w:\n out = table.hstack([self.t1, self.t5], join_type='inner')\n assert len(w) == 2\n\n assert out.meta == self.t5.meta\n\n with pytest.warns(metadata.MergeConflictWarning) as w:\n out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')\n assert len(w) == 2\n\n assert out.meta == self.t5.meta\n\n out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')\n\n assert out.meta == self.t5.meta\n\n with pytest.raises(MergeConflictError):\n out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')\n\n with pytest.raises(ValueError):\n out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')\n\n def test_bad_input_type(self, operation_table_type):\n self._setup(operation_table_type)\n with pytest.raises(ValueError):\n table.hstack([])\n with pytest.raises(TypeError):\n table.hstack(1)\n with pytest.raises(TypeError):\n table.hstack([self.t2, 1])\n with pytest.raises(ValueError):\n table.hstack([self.t1, self.t2], join_type='invalid join type')\n\n def test_stack_basic(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = self.t2\n t3 = self.t3\n t4 = self.t4\n\n out = table.hstack([t1, t2], join_type='inner')\n assert out.masked is False\n assert type(out) is operation_table_type\n assert type(out['a_1']) is type(t1['a']) # noqa\n assert type(out['b_1']) is type(t1['b']) # noqa\n assert type(out['a_2']) is type(t2['a']) # noqa\n assert type(out['b_2']) is type(t2['b']) # noqa\n assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',\n '--- --- --- --- ---',\n '0.0 foo 2.0 pez 4',\n '1.0 bar 3.0 sez 5']\n\n # stacking as a list gives same result\n out_list = table.hstack([t1, t2], join_type='inner')\n assert out.pformat() == out_list.pformat()\n\n out = table.hstack([t1, t2], join_type='outer')\n assert out.pformat() == out_list.pformat()\n\n out = table.hstack([t1, t2, t3, t4], join_type='outer')\n assert out.masked is False\n assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',\n '--- --- --- --- --- --- --- --- ---',\n '0.0 foo 2.0 pez 4 4.0 7 0.0 foo',\n '1.0 bar 3.0 sez 5 5.0 8 1.0 bar',\n ' -- -- -- -- -- 6.0 9 -- --']\n\n out = table.hstack([t1, t2, t3, t4], join_type='inner')\n assert out.masked is False\n assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',\n '--- --- --- --- --- --- --- --- ---',\n '0.0 foo 2.0 pez 4 4.0 7 0.0 foo',\n '1.0 bar 3.0 sez 5 5.0 8 1.0 bar']\n\n def test_stack_incompatible(self, operation_table_type):\n self._setup(operation_table_type)\n # For join_type exact, which will fail here because n_rows\n # does not match\n with pytest.raises(TableMergeError):\n table.hstack([self.t1, self.t3], join_type='exact')\n\n def test_hstack_one_masked(self, operation_table_type):\n if operation_table_type is QTable:\n pytest.xfail()\n self._setup(operation_table_type)\n t1 = self.t1\n t2 = operation_table_type(t1, copy=True, masked=True)\n t2.meta.clear()\n t2['b'].mask[1] = True\n out = table.hstack([t1, t2])\n assert out.pformat() == ['a_1 b_1 a_2 b_2',\n '--- --- --- ---',\n '0.0 foo 0.0 foo',\n '1.0 bar 1.0 --']\n\n def test_table_col_rename(self, operation_table_type):\n self._setup(operation_table_type)\n out = table.hstack([self.t1, self.t2], join_type='inner',\n uniq_col_name='{table_name}_{col_name}',\n table_names=('left', 'right'))\n assert out.masked is False\n assert out.pformat() == ['left_a left_b right_a right_b c ',\n '------ ------ ------- ------- ---',\n ' 0.0 foo 2.0 pez 4',\n ' 1.0 bar 3.0 sez 5']\n\n def test_col_meta_merge(self, operation_table_type):\n self._setup(operation_table_type)\n t1 = self.t1\n t3 = self.t3[:2]\n t4 = self.t4\n\n # Just set a bunch of meta and make sure it is the same in output\n meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])\n t1['a'].unit = 'cm'\n t1['b'].info.description = 't1_b'\n t4['f'].info.format = '%6s'\n t1['b'].info.meta.update(meta1)\n t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))\n t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))\n t3['d'].unit = 'm'\n t3['d'].info.format = '%6s'\n t3['d'].info.description = 't3_c'\n\n out = table.hstack([t1, t3, t4], join_type='exact')\n\n for t in [t1, t3, t4]:\n for name in t.colnames:\n for attr in ('meta', 'unit', 'format', 'description'):\n assert getattr(out[name].info, attr) == getattr(t[name].info, attr)\n\n # Make sure we got a copy of meta, not ref\n t1['b'].info.meta['b'] = None\n assert out['b'].info.meta['b'] == [1, 2]\n\n def test_hstack_one_table(self, operation_table_type):\n self._setup(operation_table_type)\n \"\"\"Regression test for issue #3313\"\"\"\n assert (self.t1 == table.hstack(self.t1)).all()\n assert (self.t1 == table.hstack([self.t1])).all()\n\n def test_mixin_functionality(self, mixin_cols):\n col1 = mixin_cols['m']\n col2 = col1[2:4] # Shorter version of col1\n t1 = table.QTable([col1])\n t2 = table.QTable([col2])\n\n cls_name = type(col1).__name__\n\n out = table.hstack([t1, t2], join_type='inner')\n assert type(out['col0_1']) is type(out['col0_2']) # noqa\n assert len(out) == len(col2)\n\n # Check that columns are as expected.\n if cls_name == 'SkyCoord':\n assert skycoord_equal(out['col0_1'], col1[:len(col2)])\n assert skycoord_equal(out['col0_2'], col2)\n elif 'Repr' in cls_name or 'Diff' in cls_name:\n assert np.all(representation_equal(out['col0_1'], col1[:len(col2)]))\n assert np.all(representation_equal(out['col0_2'], col2))\n else:\n assert np.all(out['col0_1'] == col1[:len(col2)])\n assert np.all(out['col0_2'] == col2)\n\n # Time class supports masking, all other mixins do not\n if isinstance(col1, (Time, TimeDelta, Quantity)):\n out = table.hstack([t1, t2], join_type='outer')\n assert len(out) == len(t1)\n assert np.all(out['col0_1'] == col1)\n assert np.all(out['col0_2'][:len(col2)] == col2)\n assert check_mask(out['col0_2'], [False, False, True, True])\n\n # check directly stacking mixin columns:\n out2 = table.hstack([t1, t2['col0']], join_type='outer')\n assert np.all(out['col0_1'] == out2['col0_1'])\n assert np.all(out['col0_2'] == out2['col0_2'])\n else:\n with pytest.raises(NotImplementedError) as err:\n table.hstack([t1, t2], join_type='outer')\n assert 'hstack requires masking' in str(err.value)\n\n\ndef test_unique(operation_table_type):\n t = operation_table_type.read(\n [' a b c d',\n ' 2 b 7.0 0',\n ' 1 c 3.0 5',\n ' 2 b 6.0 2',\n ' 2 a 4.0 3',\n ' 1 a 1.0 7',\n ' 2 b 5.0 1',\n ' 0 a 0.0 4',\n ' 1 a 2.0 6',\n ' 1 c 3.0 5',\n ], format='ascii')\n\n tu = operation_table_type(np.sort(t[:-1]))\n\n t_all = table.unique(t)\n assert sort_eq(t_all.pformat(), tu.pformat())\n t_s = t.copy()\n del t_s['b', 'c', 'd']\n t_all = table.unique(t_s)\n assert sort_eq(t_all.pformat(), [' a ',\n '---',\n ' 0',\n ' 1',\n ' 2'])\n\n key1 = 'a'\n t1a = table.unique(t, key1)\n assert sort_eq(t1a.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 1 c 3.0 5',\n ' 2 b 7.0 0'])\n t1b = table.unique(t, key1, keep='last')\n assert sort_eq(t1b.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 1 c 3.0 5',\n ' 2 b 5.0 1'])\n t1c = table.unique(t, key1, keep='none')\n assert sort_eq(t1c.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4'])\n\n key2 = ['a', 'b']\n t2a = table.unique(t, key2)\n assert sort_eq(t2a.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 1 a 1.0 7',\n ' 1 c 3.0 5',\n ' 2 a 4.0 3',\n ' 2 b 7.0 0'])\n\n t2b = table.unique(t, key2, keep='last')\n assert sort_eq(t2b.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 1 a 2.0 6',\n ' 1 c 3.0 5',\n ' 2 a 4.0 3',\n ' 2 b 5.0 1'])\n t2c = table.unique(t, key2, keep='none')\n assert sort_eq(t2c.pformat(), [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 2 a 4.0 3'])\n\n key2 = ['a', 'a']\n with pytest.raises(ValueError) as exc:\n t2a = table.unique(t, key2)\n assert exc.value.args[0] == \"duplicate key names\"\n\n with pytest.raises(ValueError) as exc:\n table.unique(t, key2, keep=True)\n assert exc.value.args[0] == (\n \"'keep' should be one of 'first', 'last', 'none'\")\n\n t1_m = operation_table_type(t1a, masked=True)\n t1_m['a'].mask[1] = True\n\n with pytest.raises(ValueError) as exc:\n t1_mu = table.unique(t1_m)\n assert exc.value.args[0] == (\n \"cannot use columns with masked values as keys; \"\n \"remove column 'a' from keys and rerun unique()\")\n\n t1_mu = table.unique(t1_m, silent=True)\n assert t1_mu.masked is False\n assert t1_mu.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 0 a 0.0 4',\n ' 2 b 7.0 0',\n ' -- c 3.0 5']\n\n with pytest.raises(ValueError):\n t1_mu = table.unique(t1_m, silent=True, keys='a')\n\n t1_m = operation_table_type(t, masked=True)\n t1_m['a'].mask[1] = True\n t1_m['d'].mask[3] = True\n\n # Test that multiple masked key columns get removed in the correct\n # order\n t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True)\n assert t1_mu.masked is False\n assert t1_mu.pformat() == [' a b c d ',\n '--- --- --- ---',\n ' 2 a 4.0 --',\n ' 2 b 7.0 0',\n ' -- c 3.0 5']\n\n\ndef test_vstack_bytes(operation_table_type):\n \"\"\"\n Test for issue #5617 when vstack'ing bytes columns in Py3.\n This is really an upstream numpy issue numpy/numpy/#8403.\n \"\"\"\n t = operation_table_type([[b'a']], names=['a'])\n assert t['a'].itemsize == 1\n\n t2 = table.vstack([t, t])\n assert len(t2) == 2\n assert t2['a'].itemsize == 1\n\n\ndef test_vstack_unicode():\n \"\"\"\n Test for problem related to issue #5617 when vstack'ing *unicode*\n columns. In this case the character size gets multiplied by 4.\n \"\"\"\n t = table.Table([['a']], names=['a'])\n assert t['a'].itemsize == 4 # 4-byte / char for U dtype\n\n t2 = table.vstack([t, t])\n assert len(t2) == 2\n assert t2['a'].itemsize == 4\n\n\ndef test_join_mixins_time_quantity():\n \"\"\"\n Test for table join using non-ndarray key columns.\n \"\"\"\n tm1 = Time([2, 1, 2], format='cxcsec')\n q1 = [2, 1, 1] * u.m\n idx1 = [1, 2, 3]\n tm2 = Time([2, 3], format='cxcsec')\n q2 = [2, 3] * u.m\n idx2 = [10, 20]\n t1 = Table([tm1, q1, idx1], names=['tm', 'q', 'idx'])\n t2 = Table([tm2, q2, idx2], names=['tm', 'q', 'idx'])\n # Output:\n #\n #
\n # tm q idx_1 idx_2\n # m\n # object float64 int64 int64\n # ------------------ ------- ----- -----\n # 0.9999999999969589 1.0 2 --\n # 2.00000000000351 1.0 3 --\n # 2.00000000000351 2.0 1 10\n # 3.000000000000469 3.0 -- 20\n\n t12 = table.join(t1, t2, join_type='outer', keys=['tm', 'q'])\n # Key cols are lexically sorted\n assert np.all(t12['tm'] == Time([1, 2, 2, 3], format='cxcsec'))\n assert np.all(t12['q'] == [1, 1, 2, 3] * u.m)\n assert np.all(t12['idx_1'] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1]))\n assert np.all(t12['idx_2'] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))\n\n\ndef test_join_mixins_not_sortable():\n \"\"\"\n Test for table join using non-ndarray key columns that are not sortable.\n \"\"\"\n sc = SkyCoord([1, 2], [3, 4], unit='deg,deg')\n t1 = Table([sc, [1, 2]], names=['sc', 'idx1'])\n t2 = Table([sc, [10, 20]], names=['sc', 'idx2'])\n\n with pytest.raises(TypeError, match='one or more key columns are not sortable'):\n table.join(t1, t2, keys='sc')\n\n\ndef test_join_non_1d_key_column():\n c1 = [[1, 2], [3, 4]]\n c2 = [1, 2]\n t1 = Table([c1, c2], names=['a', 'b'])\n t2 = t1.copy()\n with pytest.raises(ValueError, match=\"key column 'a' must be 1-d\"):\n table.join(t1, t2, keys='a')\n\n\ndef test_argsort_time_column():\n \"\"\"Regression test for #10823.\"\"\"\n times = Time(['2016-01-01', '2018-01-01', '2017-01-01'])\n t = Table([times], names=['time'])\n i = t.argsort('time')\n assert np.all(i == times.argsort())\n\n\ndef test_sort_indexed_table():\n \"\"\"Test fix for #9473 and #6545 - and another regression test for #10823.\"\"\"\n t = Table([[1, 3, 2], [6, 4, 5]], names=('a', 'b'))\n t.add_index('a')\n t.sort('a')\n assert np.all(t['a'] == [1, 2, 3])\n assert np.all(t['b'] == [6, 5, 4])\n t.sort('b')\n assert np.all(t['b'] == [4, 5, 6])\n assert np.all(t['a'] == [3, 2, 1])\n\n times = ['2016-01-01', '2018-01-01', '2017-01-01']\n tm = Time(times)\n t2 = Table([tm, [3, 2, 1]], names=['time', 'flux'])\n t2.sort('flux')\n assert np.all(t2['flux'] == [1, 2, 3])\n t2.sort('time')\n assert np.all(t2['flux'] == [3, 1, 2])\n assert np.all(t2['time'] == tm[[0, 2, 1]])\n\n # Using the table as a TimeSeries implicitly sets the index, so\n # this test is a bit different from the above.\n from astropy.timeseries import TimeSeries\n ts = TimeSeries(time=times)\n ts['flux'] = [3, 2, 1]\n ts.sort('flux')\n assert np.all(ts['flux'] == [1, 2, 3])\n ts.sort('time')\n assert np.all(ts['flux'] == [3, 1, 2])\n assert np.all(ts['time'] == tm[[0, 2, 1]])\n\n\ndef test_get_out_class():\n c = table.Column([1, 2])\n mc = table.MaskedColumn([1, 2])\n q = [1, 2] * u.m\n\n assert _get_out_class([c, mc]) is mc.__class__\n assert _get_out_class([mc, c]) is mc.__class__\n assert _get_out_class([c, c]) is c.__class__\n assert _get_out_class([c]) is c.__class__\n\n with pytest.raises(ValueError):\n _get_out_class([c, q])\n\n with pytest.raises(ValueError):\n _get_out_class([q, c])\n\n\ndef test_masking_required_exception():\n \"\"\"\n Test that outer join, hstack and vstack fail for a mixin column which\n does not support masking.\n \"\"\"\n col = table.NdarrayMixin([0, 1, 2, 3])\n t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b'])\n t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c'])\n\n with pytest.raises(NotImplementedError) as err:\n table.vstack([t1, t2], join_type='outer')\n assert 'vstack unavailable' in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n table.hstack([t1, t2], join_type='outer')\n assert 'hstack requires masking' in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n table.join(t1, t2, join_type='outer')\n assert 'join requires masking' in str(err.value)\n\n\ndef test_stack_columns():\n c = table.Column([1, 2])\n mc = table.MaskedColumn([1, 2])\n q = [1, 2] * u.m\n time = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])\n sc = SkyCoord([1, 2], [3, 4], unit='deg')\n cq = table.Column([11, 22], unit=u.m)\n\n t = table.hstack([c, q])\n assert t.__class__ is table.QTable\n assert t.masked is False\n t = table.hstack([q, c])\n assert t.__class__ is table.QTable\n assert t.masked is False\n\n t = table.hstack([mc, q])\n assert t.__class__ is table.QTable\n assert t.masked is False\n\n t = table.hstack([c, mc])\n assert t.__class__ is table.Table\n assert t.masked is False\n\n t = table.vstack([q, q])\n assert t.__class__ is table.QTable\n\n t = table.vstack([c, c])\n assert t.__class__ is table.Table\n\n t = table.hstack([c, time])\n assert t.__class__ is table.Table\n t = table.hstack([c, sc])\n assert t.__class__ is table.Table\n t = table.hstack([q, time, sc])\n assert t.__class__ is table.QTable\n\n with pytest.raises(ValueError):\n table.vstack([c, q])\n\n with pytest.raises(ValueError):\n t = table.vstack([q, cq])\n\n\ndef test_mixin_join_regression():\n # This used to trigger a ValueError:\n # ValueError: NumPy boolean array indexing assignment cannot assign\n # 6 input values to the 4 output values where the mask is true\n\n t1 = QTable()\n t1['index'] = [1, 2, 3, 4, 5]\n t1['flux1'] = [2, 3, 2, 1, 1] * u.Jy\n t1['flux2'] = [2, 3, 2, 1, 1] * u.Jy\n\n t2 = QTable()\n t2['index'] = [3, 4, 5, 6]\n t2['flux1'] = [2, 1, 1, 3] * u.Jy\n t2['flux2'] = [2, 1, 1, 3] * u.Jy\n\n t12 = table.join(t1, t2, keys=('index', 'flux1', 'flux2'), join_type='outer')\n\n assert len(t12) == 6\n"}}},{"rowIdx":1353,"cells":{"hash":{"kind":"string","value":"eb006a080678cd52a97c6455bd90f2a537a1e7ae8d7ceadfe9d0f3d05718e00b"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom astropy.utils.tests.test_metadata import MetaBaseTest\nimport operator\nimport warnings\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom astropy.tests.helper import assert_follows_unicode_guidelines\nfrom astropy import table\nfrom astropy import time\nfrom astropy import units as u\n\n\nclass TestColumn():\n\n def test_subclass(self, Column):\n c = Column(name='a')\n assert isinstance(c, np.ndarray)\n c2 = c * 2\n assert isinstance(c2, Column)\n assert isinstance(c2, np.ndarray)\n\n def test_numpy_ops(self, Column):\n \"\"\"Show that basic numpy operations with Column behave sensibly\"\"\"\n\n arr = np.array([1, 2, 3])\n c = Column(arr, name='a')\n\n for op, test_equal in ((operator.eq, True),\n (operator.ne, False),\n (operator.ge, True),\n (operator.gt, False),\n (operator.le, True),\n (operator.lt, False)):\n for eq in (op(c, arr), op(arr, c)):\n\n assert np.all(eq) if test_equal else not np.any(eq)\n assert len(eq) == 3\n if Column is table.Column:\n assert type(eq) == np.ndarray\n else:\n assert type(eq) == np.ma.core.MaskedArray\n assert eq.dtype.str == '|b1'\n\n lt = c - 1 < arr\n assert np.all(lt)\n\n def test_numpy_boolean_ufuncs(self, Column):\n \"\"\"Show that basic numpy operations with Column behave sensibly\"\"\"\n\n arr = np.array([1, 2, 3])\n c = Column(arr, name='a')\n\n for ufunc, test_true in ((np.isfinite, True),\n (np.isinf, False),\n (np.isnan, False),\n (np.sign, True),\n (np.signbit, False)):\n result = ufunc(c)\n assert len(result) == len(c)\n assert np.all(result) if test_true else not np.any(result)\n if Column is table.Column:\n assert type(result) == np.ndarray\n else:\n assert type(result) == np.ma.core.MaskedArray\n if ufunc is not np.sign:\n assert result.dtype.str == '|b1'\n\n def test_view(self, Column):\n c = np.array([1, 2, 3], dtype=np.int64).view(Column)\n assert repr(c) == f\"<{Column.__name__} dtype='int64' length=3>\\n1\\n2\\n3\"\n\n def test_format(self, Column):\n \"\"\"Show that the formatted output from str() works\"\"\"\n from astropy import conf\n with conf.set_temp('max_lines', 8):\n c1 = Column(np.arange(2000), name='a', dtype=float,\n format='%6.2f')\n assert str(c1).splitlines() == [' a ',\n '-------',\n ' 0.00',\n ' 1.00',\n ' ...',\n '1998.00',\n '1999.00',\n 'Length = 2000 rows']\n\n def test_convert_numpy_array(self, Column):\n d = Column([1, 2, 3], name='a', dtype='i8')\n\n np_data = np.array(d)\n assert np.all(np_data == d)\n np_data = np.array(d, copy=False)\n assert np.all(np_data == d)\n np_data = np.array(d, dtype='i4')\n assert np.all(np_data == d)\n\n def test_convert_unit(self, Column):\n d = Column([1, 2, 3], name='a', dtype=\"f8\", unit=\"m\")\n d.convert_unit_to(\"km\")\n assert np.all(d.data == [0.001, 0.002, 0.003])\n\n def test_array_wrap(self):\n \"\"\"Test that the __array_wrap__ method converts a reduction ufunc\n output that has a different shape into an ndarray view. Without this a\n method call like c.mean() returns a Column array object with length=1.\"\"\"\n # Mean and sum for a 1-d float column\n c = table.Column(name='a', data=[1., 2., 3.])\n assert np.allclose(c.mean(), 2.0)\n assert isinstance(c.mean(), (np.floating, float))\n assert np.allclose(c.sum(), 6.)\n assert isinstance(c.sum(), (np.floating, float))\n\n # Non-reduction ufunc preserves Column class\n assert isinstance(np.cos(c), table.Column)\n\n # Sum for a 1-d int column\n c = table.Column(name='a', data=[1, 2, 3])\n assert np.allclose(c.sum(), 6)\n assert isinstance(c.sum(), (np.integer, int))\n\n # Sum for a 2-d int column\n c = table.Column(name='a', data=[[1, 2, 3],\n [4, 5, 6]])\n assert c.sum() == 21\n assert isinstance(c.sum(), (np.integer, int))\n assert np.all(c.sum(axis=0) == [5, 7, 9])\n assert c.sum(axis=0).shape == (3,)\n assert isinstance(c.sum(axis=0), np.ndarray)\n\n # Sum and mean for a 1-d masked column\n c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])\n assert np.allclose(c.mean(), 1.5)\n assert isinstance(c.mean(), (np.floating, float))\n assert np.allclose(c.sum(), 3.)\n assert isinstance(c.sum(), (np.floating, float))\n\n def test_name_none(self, Column):\n \"\"\"Can create a column without supplying name, which defaults to None\"\"\"\n c = Column([1, 2])\n assert c.name is None\n assert np.all(c == np.array([1, 2]))\n\n def test_quantity_init(self, Column):\n\n c = Column(data=np.array([1, 2, 3]) * u.m)\n assert np.all(c.data == np.array([1, 2, 3]))\n assert np.all(c.unit == u.m)\n\n c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)\n assert np.all(c.data == np.array([100, 200, 300]))\n assert np.all(c.unit == u.cm)\n\n def test_quantity_comparison(self, Column):\n # regression test for gh-6532\n c = Column([1, 2100, 3], unit='Hz')\n q = 2 * u.kHz\n check = c < q\n assert np.all(check == [True, False, True])\n # This already worked, but just in case.\n check = q >= c\n assert np.all(check == [True, False, True])\n\n def test_attrs_survive_getitem_after_change(self, Column):\n \"\"\"\n Test for issue #3023: when calling getitem with a MaskedArray subclass\n the original object attributes are not copied.\n \"\"\"\n c1 = Column([1, 2, 3], name='a', unit='m', format='%i',\n description='aa', meta={'a': 1})\n c1.name = 'b'\n c1.unit = 'km'\n c1.format = '%d'\n c1.description = 'bb'\n c1.meta = {'bbb': 2}\n\n for item in (slice(None, None), slice(None, 1), np.array([0, 2]),\n np.array([False, True, False])):\n c2 = c1[item]\n assert c2.name == 'b'\n assert c2.unit is u.km\n assert c2.format == '%d'\n assert c2.description == 'bb'\n assert c2.meta == {'bbb': 2}\n\n # Make sure that calling getitem resulting in a scalar does\n # not copy attributes.\n val = c1[1]\n for attr in ('name', 'unit', 'format', 'description', 'meta'):\n assert not hasattr(val, attr)\n\n def test_to_quantity(self, Column):\n d = Column([1, 2, 3], name='a', dtype=\"f8\", unit=\"m\")\n\n assert np.all(d.quantity == ([1, 2, 3.] * u.m))\n assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)\n assert np.all(d.quantity == d.to('m'))\n assert np.all(d.quantity.value == d.to('m').value)\n\n np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)\n np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)\n\n np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,\n [299.792458, 149.896229, 99.93081933])\n\n d_nounit = Column([1, 2, 3], name='a', dtype=\"f8\", unit=None)\n with pytest.raises(u.UnitsError):\n d_nounit.to(u.km)\n assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))\n\n # make sure the correct copy/no copy behavior is happening\n q = [1, 3, 5] * u.km\n\n # to should always make a copy\n d.to(u.km)[:] = q\n np.testing.assert_allclose(d, [1, 2, 3])\n\n # explicit copying of the quantity should not change the column\n d.quantity.copy()[:] = q\n np.testing.assert_allclose(d, [1, 2, 3])\n\n # but quantity directly is a \"view\", accessing the underlying column\n d.quantity[:] = q\n np.testing.assert_allclose(d, [1000, 3000, 5000])\n\n # view should also work for integers\n d2 = Column([1, 2, 3], name='a', dtype=int, unit=\"m\")\n d2.quantity[:] = q\n np.testing.assert_allclose(d2, [1000, 3000, 5000])\n\n # but it should fail for strings or other non-numeric tables\n d3 = Column(['arg', 'name', 'stuff'], name='a', unit=\"m\")\n with pytest.raises(TypeError):\n d3.quantity\n\n def test_to_funcunit_quantity(self, Column):\n \"\"\"\n Tests for #8424, check if function-unit can be retrieved from column.\n \"\"\"\n d = Column([1, 2, 3], name='a', dtype=\"f8\", unit=\"dex(AA)\")\n\n assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))\n assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)\n assert np.all(d.quantity == d.to(\"dex(AA)\"))\n assert np.all(d.quantity.value == d.to(\"dex(AA)\").value)\n\n # make sure, casting to linear unit works\n q = [10, 100, 1000] * u.AA\n np.testing.assert_allclose(d.to(u.AA), q)\n\n def test_item_access_type(self, Column):\n \"\"\"\n Tests for #3095, which forces integer item access to always return a plain\n ndarray or MaskedArray, even in the case of a multi-dim column.\n \"\"\"\n integer_types = (int, np.int_)\n\n for int_type in integer_types:\n c = Column([[1, 2], [3, 4]])\n i0 = int_type(0)\n i1 = int_type(1)\n assert np.all(c[i0] == [1, 2])\n assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)\n assert c[i0].shape == (2,)\n\n c01 = c[i0:i1]\n assert np.all(c01 == [[1, 2]])\n assert isinstance(c01, Column)\n assert c01.shape == (1, 2)\n\n c = Column([1, 2])\n assert np.all(c[i0] == 1)\n assert isinstance(c[i0], np.integer)\n assert c[i0].shape == ()\n\n c01 = c[i0:i1]\n assert np.all(c01 == [1])\n assert isinstance(c01, Column)\n assert c01.shape == (1,)\n\n def test_insert_basic(self, Column):\n c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n\n # Basic insert\n c1 = c.insert(1, 100)\n assert np.all(c1 == [0, 100, 1, 2])\n assert c1.attrs_equal(c)\n assert type(c) is type(c1)\n if hasattr(c1, 'mask'):\n assert c1.data.shape == c1.mask.shape\n\n c1 = c.insert(-1, 100)\n assert np.all(c1 == [0, 1, 100, 2])\n\n c1 = c.insert(3, 100)\n assert np.all(c1 == [0, 1, 2, 100])\n\n c1 = c.insert(-3, 100)\n assert np.all(c1 == [100, 0, 1, 2])\n\n c1 = c.insert(1, [100, 200, 300])\n if hasattr(c1, 'mask'):\n assert c1.data.shape == c1.mask.shape\n\n # Out of bounds index\n with pytest.raises((ValueError, IndexError)):\n c1 = c.insert(-4, 100)\n with pytest.raises((ValueError, IndexError)):\n c1 = c.insert(4, 100)\n\n def test_insert_axis(self, Column):\n \"\"\"Insert with non-default axis kwarg\"\"\"\n c = Column([[1, 2], [3, 4]])\n\n c1 = c.insert(1, [5, 6], axis=None)\n assert np.all(c1 == [1, 5, 6, 2, 3, 4])\n\n c1 = c.insert(1, [5, 6], axis=1)\n assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])\n\n def test_insert_string_expand(self, Column):\n c = Column(['a', 'b'])\n c1 = c.insert(0, 'abc')\n assert np.all(c1 == ['abc', 'a', 'b'])\n\n c = Column(['a', 'b'])\n c1 = c.insert(0, ['c', 'def'])\n assert np.all(c1 == ['c', 'def', 'a', 'b'])\n\n def test_insert_string_masked_values(self):\n c = table.MaskedColumn(['a', 'b'])\n c1 = c.insert(0, np.ma.masked)\n assert np.all(c1 == ['', 'a', 'b'])\n assert np.all(c1.mask == [True, False, False])\n assert c1.dtype == 'U1'\n c2 = c.insert(1, np.ma.MaskedArray(['ccc', 'dd'], mask=[True, False]))\n assert np.all(c2 == ['a', 'ccc', 'dd', 'b'])\n assert np.all(c2.mask == [False, True, False, False])\n assert c2.dtype == 'U3'\n\n def test_insert_string_type_error(self, Column):\n c = Column([1, 2])\n with pytest.raises(ValueError, match='invalid literal for int'):\n c.insert(0, 'string')\n\n c = Column(['a', 'b'])\n with pytest.raises(TypeError, match='string operation on non-string array'):\n c.insert(0, 1)\n\n def test_insert_multidim(self, Column):\n c = Column([[1, 2],\n [3, 4]], name='a', dtype=int)\n\n # Basic insert\n c1 = c.insert(1, [100, 200])\n assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])\n\n # Broadcast\n c1 = c.insert(1, 100)\n assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])\n\n # Wrong shape\n with pytest.raises(ValueError):\n c1 = c.insert(1, [100, 200, 300])\n\n def test_insert_object(self, Column):\n c = Column(['a', 1, None], name='a', dtype=object)\n\n # Basic insert\n c1 = c.insert(1, [100, 200])\n assert np.all(c1 == np.array(['a', [100, 200], 1, None],\n dtype=object))\n\n def test_insert_masked(self):\n c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,\n mask=[False, True, False])\n\n # Basic insert\n c1 = c.insert(1, 100)\n assert np.all(c1.data.data == [0, 100, 1, 2])\n assert c1.fill_value == 9999\n assert np.all(c1.data.mask == [False, False, True, False])\n assert type(c) is type(c1)\n\n for mask in (False, True):\n c1 = c.insert(1, 100, mask=mask)\n assert np.all(c1.data.data == [0, 100, 1, 2])\n assert np.all(c1.data.mask == [False, mask, True, False])\n\n def test_masked_multidim_as_list(self):\n data = np.ma.MaskedArray([1, 2], mask=[True, False])\n c = table.MaskedColumn([data])\n assert c.shape == (1, 2)\n assert np.all(c[0].mask == [True, False])\n\n def test_insert_masked_multidim(self):\n c = table.MaskedColumn([[1, 2],\n [3, 4]], name='a', dtype=int)\n\n c1 = c.insert(1, [100, 200], mask=True)\n assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])\n assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])\n\n c1 = c.insert(1, [100, 200], mask=[True, False])\n assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])\n assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])\n\n with pytest.raises(ValueError):\n c1 = c.insert(1, [100, 200], mask=[True, False, True])\n\n def test_mask_on_non_masked_table(self):\n \"\"\"\n When table is not masked and trying to set mask on column then\n it's Raise AttributeError.\n \"\"\"\n\n t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))\n\n with pytest.raises(AttributeError):\n t['a'].mask = [True, False]\n\n\nclass TestAttrEqual():\n \"\"\"Bunch of tests originally from ATpy that test the attrs_equal method.\"\"\"\n\n def test_5(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy')\n c2 = Column(name='a', dtype=int, unit='mJy')\n assert c1.attrs_equal(c2)\n\n def test_6(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n assert c1.attrs_equal(c2)\n\n def test_7(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='b', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_8(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=float, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_9(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_10(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=int, unit='mJy', format='%g',\n description='test column', meta={'c': 8, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_11(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='another test column', meta={'c': 8, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_12(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'e': 8, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_13(self, Column):\n c1 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 9, 'd': 12})\n assert not c1.attrs_equal(c2)\n\n def test_col_and_masked_col(self):\n c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',\n description='test column', meta={'c': 8, 'd': 12})\n assert c1.attrs_equal(c2)\n assert c2.attrs_equal(c1)\n\n# Check that the meta descriptor is working as expected. The MetaBaseTest class\n# takes care of defining all the tests, and we simply have to define the class\n# and any minimal set of args to pass.\n\n\nclass TestMetaColumn(MetaBaseTest):\n test_class = table.Column\n args = ()\n\n\nclass TestMetaMaskedColumn(MetaBaseTest):\n test_class = table.MaskedColumn\n args = ()\n\n\ndef test_getitem_metadata_regression():\n \"\"\"\n Regression test for #1471: MaskedArray does not call __array_finalize__ so\n the meta-data was not getting copied over. By overloading _update_from we\n are able to work around this bug.\n \"\"\"\n\n # Make sure that meta-data gets propagated with __getitem__\n\n c = table.Column(data=[1, 2], name='a', description='b', unit='m', format=\"%i\", meta={'c': 8})\n assert c[1:2].name == 'a'\n assert c[1:2].description == 'b'\n assert c[1:2].unit == 'm'\n assert c[1:2].format == '%i'\n assert c[1:2].meta['c'] == 8\n\n c = table.MaskedColumn(data=[1, 2], name='a', description='b',\n unit='m', format=\"%i\", meta={'c': 8})\n assert c[1:2].name == 'a'\n assert c[1:2].description == 'b'\n assert c[1:2].unit == 'm'\n assert c[1:2].format == '%i'\n assert c[1:2].meta['c'] == 8\n\n # As above, but with take() - check the method and the function\n\n c = table.Column(data=[1, 2, 3], name='a', description='b',\n unit='m', format=\"%i\", meta={'c': 8})\n for subset in [c.take([0, 1]), np.take(c, [0, 1])]:\n assert subset.name == 'a'\n assert subset.description == 'b'\n assert subset.unit == 'm'\n assert subset.format == '%i'\n assert subset.meta['c'] == 8\n\n # Metadata isn't copied for scalar values\n for subset in [c.take(0), np.take(c, 0)]:\n assert subset == 1\n assert subset.shape == ()\n assert not isinstance(subset, table.Column)\n\n c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b',\n unit='m', format=\"%i\", meta={'c': 8})\n for subset in [c.take([0, 1]), np.take(c, [0, 1])]:\n assert subset.name == 'a'\n assert subset.description == 'b'\n assert subset.unit == 'm'\n assert subset.format == '%i'\n assert subset.meta['c'] == 8\n\n # Metadata isn't copied for scalar values\n for subset in [c.take(0), np.take(c, 0)]:\n assert subset == 1\n assert subset.shape == ()\n assert not isinstance(subset, table.MaskedColumn)\n\n\ndef test_unicode_guidelines():\n arr = np.array([1, 2, 3])\n c = table.Column(arr, name='a')\n\n assert_follows_unicode_guidelines(c)\n\n\ndef test_scalar_column():\n \"\"\"\n Column is not designed to hold scalars, but for numpy 1.6 this can happen:\n\n >> type(np.std(table.Column([1, 2])))\n astropy.table.column.Column\n \"\"\"\n c = table.Column(1.5)\n assert repr(c) == '1.5'\n assert str(c) == '1.5'\n\n\ndef test_qtable_column_conversion():\n \"\"\"\n Ensures that a QTable that gets assigned a unit switches to be Quantity-y\n \"\"\"\n qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])\n\n assert isinstance(qtab['i'], table.column.Column)\n assert isinstance(qtab['f'], table.column.Column)\n\n qtab['i'].unit = 'km/s'\n assert isinstance(qtab['i'], u.Quantity)\n assert isinstance(qtab['f'], table.column.Column)\n\n # should follow from the above, but good to make sure as a #4497 regression test\n assert isinstance(qtab['i'][0], u.Quantity)\n assert isinstance(qtab[0]['i'], u.Quantity)\n assert not isinstance(qtab['f'][0], u.Quantity)\n assert not isinstance(qtab[0]['f'], u.Quantity)\n\n # Regression test for #5342: if a function unit is assigned, the column\n # should become the appropriate FunctionQuantity subclass.\n qtab['f'].unit = u.dex(u.cm / u.s**2)\n assert isinstance(qtab['f'], u.Dex)\n\n\n@pytest.mark.parametrize('masked', [True, False])\ndef test_string_truncation_warning(masked):\n \"\"\"\n Test warnings associated with in-place assignment to a string\n column that results in truncation of the right hand side.\n \"\"\"\n from inspect import currentframe, getframeinfo\n\n t = table.Table([['aa', 'bb']], names=['a'], masked=masked)\n t['a'][1] = 'cc'\n t['a'][:] = 'dd'\n\n with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '\n r'string\\(s\\) longer than 2 character\\(s\\)') as w:\n frameinfo = getframeinfo(currentframe())\n t['a'][0] = 'eee' # replace item with string that gets truncated\n assert t['a'][0] == 'ee'\n assert len(w) == 1\n\n # Make sure the warning points back to the user code line\n assert w[0].lineno == frameinfo.lineno + 1\n assert 'test_column' in w[0].filename\n\n with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '\n r'string\\(s\\) longer than 2 character\\(s\\)') as w:\n t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated\n assert np.all(t['a'] == ['ff', 'gg'])\n assert len(w) == 1\n\n # Test the obscure case of assigning from an array that was originally\n # wider than any of the current elements (i.e. dtype is U4 but actual\n # elements are U1 at the time of assignment).\n val = np.array(['ffff', 'gggg'])\n val[:] = ['f', 'g']\n t['a'][:] = val\n assert np.all(t['a'] == ['f', 'g'])\n\n\ndef test_string_truncation_warning_masked():\n \"\"\"\n Test warnings associated with in-place assignment to a string\n to a masked column, specifically where the right hand side\n contains np.ma.masked.\n \"\"\"\n\n # Test for strings, but also cover assignment of np.ma.masked to\n # int and float masked column setting. This was previously only\n # covered in an unrelated io.ascii test (test_line_endings) which\n # showed an unexpected difference between handling of str and numeric\n # masked arrays.\n for values in (['a', 'b'], [1, 2], [1.0, 2.0]):\n mc = table.MaskedColumn(values)\n\n mc[1] = np.ma.masked\n assert np.all(mc.mask == [False, True])\n\n mc[:] = np.ma.masked\n assert np.all(mc.mask == [True, True])\n\n mc = table.MaskedColumn(['aa', 'bb'])\n\n with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '\n r'string\\(s\\) longer than 2 character\\(s\\)') as w:\n mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated\n assert mc[1] == 'gg'\n assert np.all(mc.mask == [True, False])\n assert len(w) == 1\n\n\n@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))\ndef test_col_unicode_sandwich_create_from_str(Column):\n \"\"\"\n Create a bytestring Column from strings (including unicode) in Py3.\n \"\"\"\n # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.\n # Stress the system by injecting non-ASCII characters.\n uba = 'bä'\n c = Column([uba, 'def'], dtype='S')\n assert c.dtype.char == 'S'\n assert c[0] == uba\n assert isinstance(c[0], str)\n assert isinstance(c[:0], table.Column)\n assert np.all(c[:2] == np.array([uba, 'def']))\n\n\n@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))\ndef test_col_unicode_sandwich_bytes_obj(Column):\n \"\"\"\n Create a Column of dtype object with bytestring in it and make sure\n it keeps the bytestring and not convert to str with accessed.\n \"\"\"\n c = Column([None, b'def'])\n assert c.dtype.char == 'O'\n assert not c[0]\n assert c[1] == b'def'\n assert isinstance(c[1], bytes)\n assert not isinstance(c[1], str)\n assert isinstance(c[:0], table.Column)\n assert np.all(c[:2] == np.array([None, b'def']))\n assert not np.all(c[:2] == np.array([None, 'def']))\n\n\n@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))\ndef test_col_unicode_sandwich_bytes(Column):\n \"\"\"\n Create a bytestring Column from bytes and ensure that it works in Python 3 in\n a convenient way like in Python 2.\n \"\"\"\n # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.\n # Stress the system by injecting non-ASCII characters.\n uba = 'bä'\n uba8 = uba.encode('utf-8')\n c = Column([uba8, b'def'])\n assert c.dtype.char == 'S'\n assert c[0] == uba\n assert isinstance(c[0], str)\n assert isinstance(c[:0], table.Column)\n assert np.all(c[:2] == np.array([uba, 'def']))\n\n assert isinstance(c[:], table.Column)\n assert c[:].dtype.char == 'S'\n\n # Array / list comparisons\n assert np.all(c == [uba, 'def'])\n\n ok = c == [uba8, b'def']\n assert type(ok) is type(c.data) # noqa\n assert ok.dtype.char == '?'\n assert np.all(ok)\n\n assert np.all(c == np.array([uba, 'def']))\n assert np.all(c == np.array([uba8, b'def']))\n\n # Scalar compare\n cmps = (uba, uba8)\n for cmp in cmps:\n ok = c == cmp\n assert type(ok) is type(c.data) # noqa\n assert np.all(ok == [True, False])\n\n\ndef test_col_unicode_sandwich_unicode():\n \"\"\"\n Sanity check that Unicode Column behaves normally.\n \"\"\"\n uba = 'bä'\n uba8 = uba.encode('utf-8')\n\n c = table.Column([uba, 'def'], dtype='U')\n assert c[0] == uba\n assert isinstance(c[:0], table.Column)\n assert isinstance(c[0], str)\n assert np.all(c[:2] == np.array([uba, 'def']))\n\n assert isinstance(c[:], table.Column)\n assert c[:].dtype.char == 'U'\n\n ok = c == [uba, 'def']\n assert type(ok) == np.ndarray\n assert ok.dtype.char == '?'\n assert np.all(ok)\n\n with warnings.catch_warnings():\n # Ignore the FutureWarning in numpy >=1.24 (it is OK).\n warnings.filterwarnings('ignore', message='.*elementwise comparison failed.*')\n assert np.all(c != [uba8, b'def'])\n\n\ndef test_masked_col_unicode_sandwich():\n \"\"\"\n Create a bytestring MaskedColumn and ensure that it works in Python 3 in\n a convenient way like in Python 2.\n \"\"\"\n c = table.MaskedColumn([b'abc', b'def'])\n c[1] = np.ma.masked\n assert isinstance(c[:0], table.MaskedColumn)\n assert isinstance(c[0], str)\n\n assert c[0] == 'abc'\n assert c[1] is np.ma.masked\n\n assert isinstance(c[:], table.MaskedColumn)\n assert c[:].dtype.char == 'S'\n\n ok = c == ['abc', 'def']\n assert ok[0] == True # noqa\n assert ok[1] is np.ma.masked\n assert np.all(c == [b'abc', b'def'])\n assert np.all(c == np.array(['abc', 'def']))\n assert np.all(c == np.array([b'abc', b'def']))\n\n for cmp in ('abc', b'abc'):\n ok = c == cmp\n assert type(ok) is np.ma.MaskedArray\n assert ok[0] == True # noqa\n assert ok[1] is np.ma.masked\n\n\n@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))\ndef test_unicode_sandwich_set(Column):\n \"\"\"\n Test setting\n \"\"\"\n uba = 'bä'\n\n c = Column([b'abc', b'def'])\n\n c[0] = b'aa'\n assert np.all(c == ['aa', 'def'])\n\n c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding\n assert np.all(c == [uba, 'def'])\n assert c.pformat() == ['None', '----', ' ' + uba, ' def']\n\n c[:] = b'cc'\n assert np.all(c == ['cc', 'cc'])\n\n c[:] = uba\n assert np.all(c == [uba, uba])\n\n c[:] = ''\n c[:] = [uba, b'def']\n assert np.all(c == [uba, b'def'])\n\n\n@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])\n@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])\ndef test_unicode_sandwich_compare(class1, class2):\n \"\"\"Test that comparing a bytestring Column/MaskedColumn with various\n str (unicode) object types gives the expected result. Tests #6838.\n \"\"\"\n obj1 = class1([b'a', b'c'])\n if class2 is str:\n obj2 = 'a'\n elif class2 is list:\n obj2 = ['a', 'b']\n else:\n obj2 = class2(['a', 'b'])\n\n assert np.all((obj1 == obj2) == [True, False])\n assert np.all((obj2 == obj1) == [True, False])\n\n assert np.all((obj1 != obj2) == [False, True])\n assert np.all((obj2 != obj1) == [False, True])\n\n assert np.all((obj1 > obj2) == [False, True])\n assert np.all((obj2 > obj1) == [False, False])\n\n assert np.all((obj1 <= obj2) == [True, False])\n assert np.all((obj2 <= obj1) == [True, True])\n\n assert np.all((obj1 < obj2) == [False, False])\n assert np.all((obj2 < obj1) == [False, True])\n\n assert np.all((obj1 >= obj2) == [True, True])\n assert np.all((obj2 >= obj1) == [True, False])\n\n\ndef test_unicode_sandwich_masked_compare():\n \"\"\"Test the fix for #6839 from #6899.\"\"\"\n c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],\n mask=[True, False, True, False])\n c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],\n mask=[True, True, False, False])\n\n for cmp in ((c1 == c2), (c2 == c1)):\n assert cmp[0] is np.ma.masked\n assert cmp[1] is np.ma.masked\n assert cmp[2] is np.ma.masked\n assert cmp[3]\n\n for cmp in ((c1 != c2), (c2 != c1)):\n assert cmp[0] is np.ma.masked\n assert cmp[1] is np.ma.masked\n assert cmp[2] is np.ma.masked\n assert not cmp[3]\n\n # Note: comparisons <, >, >=, <= fail to return a masked array entirely,\n # see https://github.com/numpy/numpy/issues/10092.\n\n\ndef test_structured_masked_column_roundtrip():\n mc = table.MaskedColumn([(1., 2.), (3., 4.)],\n mask=[(False, False), (False, False)], dtype='f8,f8')\n assert len(mc.dtype.fields) == 2\n mc2 = table.MaskedColumn(mc)\n assert_array_equal(mc2, mc)\n\n\n@pytest.mark.parametrize('dtype', ['i4,f4', 'f4,(2,)f8'])\ndef test_structured_empty_column_init(dtype):\n dtype = np.dtype(dtype)\n c = table.Column(length=5, shape=(2,), dtype=dtype)\n assert c.shape == (5, 2)\n assert c.dtype == dtype\n\n\ndef test_column_value_access():\n \"\"\"Can a column's underlying data consistently be accessed via `.value`,\n whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?\"\"\"\n data = np.array([1, 2, 3])\n tbl = table.QTable({'a': table.Column(data),\n 'b': table.MaskedColumn(data),\n 'c': u.Quantity(data),\n 'd': time.Time(data, format='mjd')})\n assert type(tbl['a'].value) == np.ndarray\n assert type(tbl['b'].value) == np.ma.MaskedArray\n assert type(tbl['c'].value) == np.ndarray\n assert type(tbl['d'].value) == np.ndarray\n\n\ndef test_masked_column_serialize_method_propagation():\n mc = table.MaskedColumn([1., 2., 3.], mask=[True, False, True])\n assert mc.info.serialize_method['ecsv'] == 'null_value'\n mc.info.serialize_method['ecsv'] = 'data_mask'\n assert mc.info.serialize_method['ecsv'] == 'data_mask'\n mc2 = mc.copy()\n assert mc2.info.serialize_method['ecsv'] == 'data_mask'\n mc3 = table.MaskedColumn(mc)\n assert mc3.info.serialize_method['ecsv'] == 'data_mask'\n mc4 = mc.view(table.MaskedColumn)\n assert mc4.info.serialize_method['ecsv'] == 'data_mask'\n mc5 = mc[1:]\n assert mc5.info.serialize_method['ecsv'] == 'data_mask'\n\n\n@pytest.mark.parametrize('dtype', ['S', 'U', 'i'])\ndef test_searchsorted(Column, dtype):\n c = Column([1, 2, 2, 3], dtype=dtype)\n if isinstance(Column, table.MaskedColumn):\n # Searchsorted seems to ignore the mask\n c[2] = np.ma.masked\n\n if dtype == 'i':\n vs = (2, [2, 1])\n else:\n vs = ('2', ['2', '1'], b'2', [b'2', b'1'])\n for v in vs:\n v = np.array(v, dtype=dtype)\n exp = np.searchsorted(c.data, v, side='right')\n res = c.searchsorted(v, side='right')\n assert np.all(res == exp)\n res = np.searchsorted(c, v, side='right')\n assert np.all(res == exp)\n"}}},{"rowIdx":1354,"cells":{"hash":{"kind":"string","value":"c7719415693adf869cf20249c8a04b5c428e803f6cfdf7ce3fb26bdc76258c12"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport copy\nimport pickle\nfrom io import StringIO\n\nimport pytest\nimport numpy as np\n\nfrom astropy.table.serialize import represent_mixins_as_columns\nfrom astropy.utils.data_info import ParentDtypeInfo\nfrom astropy.table.table_helpers import ArrayWrapper\nfrom astropy.coordinates import EarthLocation, SkyCoord\nfrom astropy.table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin\nfrom astropy.table import serialize\nfrom astropy import time\nfrom astropy import coordinates\nfrom astropy import units as u\nfrom astropy.table.column import BaseColumn\nfrom astropy.table import table_helpers\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom astropy.utils.metadata import MergeConflictWarning\nfrom astropy.coordinates.tests.test_representation import representation_equal\nfrom astropy.coordinates.tests.helper import skycoord_equal\n\nfrom .conftest import MIXIN_COLS\n\n\ndef test_attributes(mixin_cols):\n \"\"\"\n Required attributes for a column can be set.\n \"\"\"\n m = mixin_cols['m']\n m.info.name = 'a'\n assert m.info.name == 'a'\n\n m.info.description = 'a'\n assert m.info.description == 'a'\n\n # Cannot set unit for these classes\n if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time, time.TimeDelta,\n coordinates.BaseRepresentationOrDifferential)):\n with pytest.raises(AttributeError):\n m.info.unit = u.m\n else:\n m.info.unit = u.m\n assert m.info.unit is u.m\n\n m.info.format = 'a'\n assert m.info.format == 'a'\n\n m.info.meta = {'a': 1}\n assert m.info.meta == {'a': 1}\n\n with pytest.raises(AttributeError):\n m.info.bad_attr = 1\n\n with pytest.raises(AttributeError):\n m.info.bad_attr\n\n\ndef check_mixin_type(table, table_col, in_col):\n # We check for QuantityInfo rather than just isinstance(col, u.Quantity)\n # since we want to treat EarthLocation as a mixin, even though it is\n # a Quantity subclass.\n if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable)\n or isinstance(in_col, Column)):\n assert type(table_col) is table.ColumnClass\n else:\n assert type(table_col) is type(in_col)\n\n # Make sure in_col got copied and creating table did not touch it\n assert in_col.info.name is None\n\n\ndef test_make_table(table_types, mixin_cols):\n \"\"\"\n Make a table with the columns in mixin_cols, which is an ordered dict of\n three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.\n \"\"\"\n t = table_types.Table(mixin_cols)\n check_mixin_type(t, t['m'], mixin_cols['m'])\n\n cols = list(mixin_cols.values())\n t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))\n check_mixin_type(t, t['m'], mixin_cols['m'])\n\n t = table_types.Table(cols)\n check_mixin_type(t, t['col3'], mixin_cols['m'])\n\n\ndef test_io_ascii_write():\n \"\"\"\n Test that table with mixin column can be written by io.ascii for\n every pure Python writer. No validation of the output is done,\n this just confirms no exceptions.\n \"\"\"\n from astropy.io.ascii.connect import _get_connectors_table\n t = QTable(MIXIN_COLS)\n for fmt in _get_connectors_table():\n if fmt['Write'] and '.fast_' not in fmt['Format']:\n out = StringIO()\n t.write(out, format=fmt['Format'])\n\n\ndef test_votable_quantity_write(tmpdir):\n \"\"\"\n Test that table with Quantity mixin column can be round-tripped by\n io.votable. Note that FITS and HDF5 mixin support are tested (much more\n thoroughly) in their respective subpackage tests\n (io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).\n \"\"\"\n t = QTable()\n t['a'] = u.Quantity([1, 2, 4], unit='nm')\n\n filename = str(tmpdir.join('table-tmp'))\n t.write(filename, format='votable', overwrite=True)\n qt = QTable.read(filename, format='votable')\n assert isinstance(qt['a'], u.Quantity)\n assert qt['a'].unit == 'nm'\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize('table_types', (Table, QTable))\ndef test_io_time_write_fits_standard(tmpdir, table_types):\n \"\"\"\n Test that table with Time mixin columns can be written by io.fits.\n Validation of the output is done. Test that io.fits writes a table\n containing Time mixin columns that can be partially round-tripped\n (metadata scale, location).\n\n Note that we postpone checking the \"local\" scale, since that cannot\n be done with format 'cxcsec', as it requires an epoch.\n \"\"\"\n t = table_types([[1, 2], ['string', 'column']])\n for scale in time.STANDARD_TIME_SCALES:\n t['a' + scale] = time.Time([[1, 2], [3, 4]], format='cxcsec',\n scale=scale, location=EarthLocation(\n -2446354, 4237210, 4077985, unit='m'))\n t['b' + scale] = time.Time(['1999-01-01T00:00:00.123456789',\n '2010-01-01T00:00:00'], scale=scale)\n t['c'] = [3., 4.]\n\n filename = str(tmpdir.join('table-tmp'))\n\n # Show that FITS format succeeds\n with pytest.warns(\n AstropyUserWarning,\n match='Time Column \"btai\" has no specified location, '\n 'but global Time Position is present'):\n t.write(filename, format='fits', overwrite=True)\n with pytest.warns(\n AstropyUserWarning,\n match='Time column reference position \"TRPOSn\" is not specified'):\n tm = table_types.read(filename, format='fits', astropy_native=True)\n\n for scale in time.STANDARD_TIME_SCALES:\n for ab in ('a', 'b'):\n name = ab + scale\n\n # Assert that the time columns are read as Time\n assert isinstance(tm[name], time.Time)\n\n # Assert that the scales round-trip\n assert tm[name].scale == t[name].scale\n\n # Assert that the format is jd\n assert tm[name].format == 'jd'\n\n # Assert that the location round-trips\n assert tm[name].location == t[name].location\n\n # Finally assert that the column data round-trips\n assert (tm[name] == t[name]).all()\n\n for name in ('col0', 'col1', 'c'):\n # Assert that the non-time columns are read as Column\n assert isinstance(tm[name], Column)\n\n # Assert that the non-time columns' data round-trips\n assert (tm[name] == t[name]).all()\n\n # Test for conversion of time data to its value, as defined by its format\n for scale in time.STANDARD_TIME_SCALES:\n for ab in ('a', 'b'):\n name = ab + scale\n t[name].info.serialize_method['fits'] = 'formatted_value'\n\n t.write(filename, format='fits', overwrite=True)\n tm = table_types.read(filename, format='fits')\n\n for scale in time.STANDARD_TIME_SCALES:\n for ab in ('a', 'b'):\n name = ab + scale\n\n assert not isinstance(tm[name], time.Time)\n assert (tm[name] == t[name].value).all()\n\n\n@pytest.mark.parametrize('table_types', (Table, QTable))\ndef test_io_time_write_fits_local(tmpdir, table_types):\n \"\"\"\n Test that table with a Time mixin with scale local can also be written\n by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding\n ``cxcsec`` format, which requires an epoch and thus cannot be used for a\n local time scale.\n \"\"\"\n t = table_types([[1, 2], ['string', 'column']])\n t['a_local'] = time.Time([[50001, 50002], [50003, 50004]],\n format='mjd', scale='local',\n location=EarthLocation(-2446354, 4237210, 4077985,\n unit='m'))\n t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789',\n '2010-01-01T00:00:00'], scale='local')\n t['c'] = [3., 4.]\n\n filename = str(tmpdir.join('table-tmp'))\n\n # Show that FITS format succeeds\n\n with pytest.warns(AstropyUserWarning,\n match='Time Column \"b_local\" has no specified location'):\n t.write(filename, format='fits', overwrite=True)\n\n with pytest.warns(AstropyUserWarning,\n match='Time column reference position \"TRPOSn\" is not specified.'):\n tm = table_types.read(filename, format='fits', astropy_native=True)\n\n for ab in ('a', 'b'):\n name = ab + '_local'\n\n # Assert that the time columns are read as Time\n assert isinstance(tm[name], time.Time)\n\n # Assert that the scales round-trip\n assert tm[name].scale == t[name].scale\n\n # Assert that the format is jd\n assert tm[name].format == 'jd'\n\n # Assert that the location round-trips\n assert tm[name].location == t[name].location\n\n # Finally assert that the column data round-trips\n assert (tm[name] == t[name]).all()\n\n for name in ('col0', 'col1', 'c'):\n # Assert that the non-time columns are read as Column\n assert isinstance(tm[name], Column)\n\n # Assert that the non-time columns' data round-trips\n assert (tm[name] == t[name]).all()\n\n # Test for conversion of time data to its value, as defined by its format.\n for ab in ('a', 'b'):\n name = ab + '_local'\n t[name].info.serialize_method['fits'] = 'formatted_value'\n\n t.write(filename, format='fits', overwrite=True)\n tm = table_types.read(filename, format='fits')\n\n for ab in ('a', 'b'):\n name = ab + '_local'\n\n assert not isinstance(tm[name], time.Time)\n assert (tm[name] == t[name].value).all()\n\n\ndef test_votable_mixin_write_fail(mixin_cols):\n \"\"\"\n Test that table with mixin columns (excluding Quantity) cannot be written by\n io.votable.\n \"\"\"\n t = QTable(mixin_cols)\n # Only do this test if there are unsupported column types (i.e. anything besides\n # BaseColumn and Quantity class instances).\n unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))\n\n if not unsupported_cols:\n pytest.skip(\"no unsupported column types\")\n\n out = StringIO()\n with pytest.raises(ValueError) as err:\n t.write(out, format='votable')\n assert 'cannot write table with mixin column(s)' in str(err.value)\n\n\ndef test_join(table_types):\n \"\"\"\n Join tables with mixin cols. Use column \"i\" as proxy for what the\n result should be for each mixin.\n \"\"\"\n t1 = table_types.Table()\n t1['a'] = table_types.Column(['a', 'b', 'b', 'c'])\n t1['i'] = table_types.Column([0, 1, 2, 3])\n for name, col in MIXIN_COLS.items():\n t1[name] = col\n\n t2 = table_types.Table(t1)\n t2['a'] = ['b', 'c', 'a', 'd']\n\n for name, col in MIXIN_COLS.items():\n t1[name].info.description = name\n t2[name].info.description = name + '2'\n\n for join_type in ('inner', 'left'):\n t12 = join(t1, t2, keys='a', join_type=join_type)\n idx1 = t12['i_1']\n idx2 = t12['i_2']\n for name, col in MIXIN_COLS.items():\n name1 = name + '_1'\n name2 = name + '_2'\n assert_table_name_col_equal(t12, name1, col[idx1])\n assert_table_name_col_equal(t12, name2, col[idx2])\n assert t12[name1].info.description == name\n assert t12[name2].info.description == name + '2'\n\n for join_type in ('outer', 'right'):\n with pytest.raises(NotImplementedError) as exc:\n t12 = join(t1, t2, keys='a', join_type=join_type)\n assert 'join requires masking column' in str(exc.value)\n\n with pytest.raises(TypeError) as exc:\n t12 = join(t1, t2, keys=['a', 'skycoord'])\n assert 'one or more key columns are not sortable' in str(exc.value)\n\n # Join does work for a mixin which is a subclass of np.ndarray\n with pytest.warns(MergeConflictWarning,\n match=\"In merged column 'quantity' the 'description' \"\n \"attribute does not match\"):\n t12 = join(t1, t2, keys=['quantity'])\n assert np.all(t12['a_1'] == t1['a'])\n\n\ndef test_hstack(table_types):\n \"\"\"\n Hstack tables with mixin cols. Use column \"i\" as proxy for what the\n result should be for each mixin.\n \"\"\"\n t1 = table_types.Table()\n t1['i'] = table_types.Column([0, 1, 2, 3])\n for name, col in MIXIN_COLS.items():\n t1[name] = col\n t1[name].info.description = name\n t1[name].info.meta = {'a': 1}\n\n for join_type in ('inner', 'outer'):\n for chop in (True, False):\n t2 = table_types.Table(t1)\n if chop:\n t2 = t2[:-1]\n if join_type == 'outer':\n with pytest.raises(NotImplementedError) as exc:\n t12 = hstack([t1, t2], join_type=join_type)\n assert 'hstack requires masking column' in str(exc.value)\n continue\n\n t12 = hstack([t1, t2], join_type=join_type)\n idx1 = t12['i_1']\n idx2 = t12['i_2']\n for name, col in MIXIN_COLS.items():\n name1 = name + '_1'\n name2 = name + '_2'\n assert_table_name_col_equal(t12, name1, col[idx1])\n assert_table_name_col_equal(t12, name2, col[idx2])\n for attr in ('description', 'meta'):\n assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)\n assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)\n\n\ndef assert_table_name_col_equal(t, name, col):\n \"\"\"\n Assert all(t[name] == col), with special handling for known mixin cols.\n \"\"\"\n if isinstance(col, coordinates.SkyCoord):\n assert np.all(t[name].ra == col.ra)\n assert np.all(t[name].dec == col.dec)\n elif isinstance(col, coordinates.BaseRepresentationOrDifferential):\n assert np.all(representation_equal(t[name], col))\n elif isinstance(col, u.Quantity):\n if type(t) is QTable:\n assert np.all(t[name] == col)\n elif isinstance(col, table_helpers.ArrayWrapper):\n assert np.all(t[name].data == col.data)\n else:\n assert np.all(t[name] == col)\n\n\ndef test_get_items(mixin_cols):\n \"\"\"\n Test that slicing / indexing table gives right values and col attrs inherit\n \"\"\"\n attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')\n m = mixin_cols['m']\n m.info.name = 'm'\n m.info.format = '{0}'\n m.info.description = 'd'\n m.info.meta = {'a': 1}\n t = QTable([m])\n for item in ([1, 3], np.array([0, 2]), slice(1, 3)):\n t2 = t[item]\n m2 = m[item]\n assert_table_name_col_equal(t2, 'm', m[item])\n for attr in attrs:\n assert getattr(t2['m'].info, attr) == getattr(m.info, attr)\n assert getattr(m2.info, attr) == getattr(m.info, attr)\n\n\ndef test_info_preserved_pickle_copy_init(mixin_cols):\n \"\"\"\n Test copy, pickle, and init from class roundtrip preserve info. This\n tests not only the mixin classes but a regular column as well.\n \"\"\"\n def pickle_roundtrip(c):\n return pickle.loads(pickle.dumps(c))\n\n def init_from_class(c):\n return c.__class__(c)\n\n attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')\n for colname in ('i', 'm'):\n m = mixin_cols[colname]\n m.info.name = colname\n m.info.format = '{0}'\n m.info.description = 'd'\n m.info.meta = {'a': 1}\n for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):\n m2 = func(m)\n for attr in attrs:\n # non-native byteorder not preserved by last 2 func, _except_ for structured dtype\n if (attr != 'dtype'\n or getattr(m.info.dtype, 'isnative', True)\n or m.info.dtype.name.startswith('void')\n or func in (copy.copy, copy.deepcopy)):\n original = getattr(m.info, attr)\n else:\n # func does not preserve byteorder, check against (native) type.\n original = m.info.dtype.newbyteorder('=')\n assert getattr(m2.info, attr) == original\n\n\ndef test_add_column(mixin_cols):\n \"\"\"\n Test that adding a column preserves values and attributes\n \"\"\"\n attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')\n m = mixin_cols['m']\n assert m.info.name is None\n\n # Make sure adding column in various ways doesn't touch\n t = QTable([m], names=['a'])\n assert m.info.name is None\n\n t['new'] = m\n assert m.info.name is None\n\n m.info.name = 'm'\n m.info.format = '{0}'\n m.info.description = 'd'\n m.info.meta = {'a': 1}\n t = QTable([m])\n\n # Add columns m2, m3, m4 by two different methods and test expected equality\n t['m2'] = m\n m.info.name = 'm3'\n t.add_columns([m], copy=True)\n m.info.name = 'm4'\n t.add_columns([m], copy=False)\n for name in ('m2', 'm3', 'm4'):\n assert_table_name_col_equal(t, name, m)\n for attr in attrs:\n if attr != 'name':\n assert getattr(t['m'].info, attr) == getattr(t[name].info, attr)\n # Also check that one can set using a scalar.\n s = m[0]\n if type(s) is type(m) and 'info' in s.__dict__:\n # We're not going to worry about testing classes for which scalars\n # are a different class than the real array, or where info is not copied.\n t['s'] = m[0]\n assert_table_name_col_equal(t, 's', m[0])\n for attr in attrs:\n if attr != 'name':\n assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)\n\n # While we're add it, also check a length-1 table.\n t = QTable([m[1:2]], names=['m'])\n if type(s) is type(m) and 'info' in s.__dict__:\n t['s'] = m[0]\n assert_table_name_col_equal(t, 's', m[0])\n for attr in attrs:\n if attr != 'name':\n assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)\n\n\ndef test_vstack():\n \"\"\"\n Vstack tables with mixin cols.\n \"\"\"\n t1 = QTable(MIXIN_COLS)\n t2 = QTable(MIXIN_COLS)\n with pytest.raises(NotImplementedError):\n vstack([t1, t2])\n\n\ndef test_insert_row(mixin_cols):\n \"\"\"\n Test inserting a row, which works for Column, Quantity, Time and SkyCoord.\n \"\"\"\n t = QTable(mixin_cols)\n t0 = t.copy()\n t['m'].info.description = 'd'\n idxs = [0, -1, 1, 2, 3]\n if isinstance(t['m'], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord)):\n t.insert_row(1, t[-1])\n\n for name in t.colnames:\n col = t[name]\n if isinstance(col, coordinates.SkyCoord):\n assert skycoord_equal(col, t0[name][idxs])\n else:\n assert np.all(col == t0[name][idxs])\n\n assert t['m'].info.description == 'd'\n else:\n with pytest.raises(ValueError) as exc:\n t.insert_row(1, t[-1])\n assert \"Unable to insert row\" in str(exc.value)\n\n\ndef test_insert_row_bad_unit():\n \"\"\"\n Insert a row into a QTable with the wrong unit\n \"\"\"\n t = QTable([[1] * u.m])\n with pytest.raises(ValueError) as exc:\n t.insert_row(0, (2 * u.m / u.s,))\n assert \"'m / s' (speed/velocity) and 'm' (length) are not convertible\" in str(exc.value)\n\n\ndef test_convert_np_array(mixin_cols):\n \"\"\"\n Test that converting to numpy array creates an object dtype and that\n each instance in the array has the expected type.\n \"\"\"\n t = QTable(mixin_cols)\n ta = t.as_array()\n m = mixin_cols['m']\n dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O'\n assert ta['m'].dtype.kind == dtype_kind\n\n\ndef test_assignment_and_copy():\n \"\"\"\n Test that assignment of an int, slice, and fancy index works.\n Along the way test that copying table works.\n \"\"\"\n for name in ('quantity', 'arraywrap'):\n m = MIXIN_COLS[name]\n t0 = QTable([m], names=['m'])\n for i0, i1 in ((1, 2),\n (slice(0, 2), slice(1, 3)),\n (np.array([1, 2]), np.array([2, 3]))):\n t = t0.copy()\n t['m'][i0] = m[i1]\n if name == 'arraywrap':\n assert np.all(t['m'].data[i0] == m.data[i1])\n assert np.all(t0['m'].data[i0] == m.data[i0])\n assert np.all(t0['m'].data[i0] != t['m'].data[i0])\n else:\n assert np.all(t['m'][i0] == m[i1])\n assert np.all(t0['m'][i0] == m[i0])\n assert np.all(t0['m'][i0] != t['m'][i0])\n\n\ndef test_conversion_qtable_table():\n \"\"\"\n Test that a table round trips from QTable => Table => QTable\n \"\"\"\n qt = QTable(MIXIN_COLS)\n names = qt.colnames\n for name in names:\n qt[name].info.description = name\n\n t = Table(qt)\n for name in names:\n assert t[name].info.description == name\n if name == 'quantity':\n assert np.all(t['quantity'] == qt['quantity'].value)\n assert np.all(t['quantity'].unit is qt['quantity'].unit)\n assert isinstance(t['quantity'], t.ColumnClass)\n else:\n assert_table_name_col_equal(t, name, qt[name])\n\n qt2 = QTable(qt)\n for name in names:\n assert qt2[name].info.description == name\n assert_table_name_col_equal(qt2, name, qt[name])\n\n\ndef test_setitem_as_column_name():\n \"\"\"\n Test for mixin-related regression described in #3321.\n \"\"\"\n t = Table()\n t['a'] = ['x', 'y']\n t['b'] = 'b' # Previously was failing with KeyError\n assert np.all(t['a'] == ['x', 'y'])\n assert np.all(t['b'] == ['b', 'b'])\n\n\ndef test_quantity_representation():\n \"\"\"\n Test that table representation of quantities does not have unit\n \"\"\"\n t = QTable([[1, 2] * u.m])\n assert t.pformat() == ['col0',\n ' m ',\n '----',\n ' 1.0',\n ' 2.0']\n\n\ndef test_representation_representation():\n \"\"\"\n Test that Representations are represented correctly.\n \"\"\"\n # With no unit we get \"None\" in the unit row\n c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one)\n t = Table([c])\n assert t.pformat() == [' col0 ',\n '------------',\n '(0., 1., 0.)']\n\n c = coordinates.CartesianRepresentation([0], [1], [0], unit='m')\n t = Table([c])\n assert t.pformat() == [' col0 ',\n ' m ',\n '------------',\n '(0., 1., 0.)']\n\n c = coordinates.SphericalRepresentation([10]*u.deg, [20]*u.deg, [1]*u.pc)\n t = Table([c])\n assert t.pformat() == [' col0 ',\n ' deg, deg, pc ',\n '--------------',\n '(10., 20., 1.)']\n\n c = coordinates.UnitSphericalRepresentation([10]*u.deg, [20]*u.deg)\n t = Table([c])\n assert t.pformat() == [' col0 ',\n ' deg ',\n '----------',\n '(10., 20.)']\n\n c = coordinates.SphericalCosLatDifferential(\n [10]*u.mas/u.yr, [2]*u.mas/u.yr, [10]*u.km/u.s)\n t = Table([c])\n assert t.pformat() == [' col0 ',\n 'mas / yr, mas / yr, km / s',\n '--------------------------',\n ' (10., 2., 10.)']\n\n\ndef test_skycoord_representation():\n \"\"\"\n Test that skycoord representation works, both in the way that the\n values are output and in changing the frame representation.\n \"\"\"\n # With no unit we get \"None\" in the unit row\n c = coordinates.SkyCoord([0], [1], [0], representation_type='cartesian')\n t = Table([c])\n assert t.pformat() == [' col0 ',\n 'None,None,None',\n '--------------',\n ' 0.0,1.0,0.0']\n\n # Test that info works with a dynamically changed representation\n c = coordinates.SkyCoord([0], [1], [0], unit='m', representation_type='cartesian')\n t = Table([c])\n assert t.pformat() == [' col0 ',\n ' m,m,m ',\n '-----------',\n '0.0,1.0,0.0']\n\n t['col0'].representation_type = 'unitspherical'\n assert t.pformat() == [' col0 ',\n 'deg,deg ',\n '--------',\n '90.0,0.0']\n\n t['col0'].representation_type = 'cylindrical'\n assert t.pformat() == [' col0 ',\n ' m,deg,m ',\n '------------',\n '1.0,90.0,0.0']\n\n\n@pytest.mark.parametrize('as_ndarray_mixin', [True, False])\ndef test_ndarray_mixin(as_ndarray_mixin):\n \"\"\"\n Test directly adding various forms of structured ndarray columns to a table.\n Adding as NdarrayMixin is expected to be somewhat unusual after #12644\n (which provides full support for structured array Column's). This test shows\n that the end behavior is the same in both cases.\n \"\"\"\n a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')],\n dtype='',\n '',\n '',\n '',\n '',\n '
col0col1col2
int64[2]int64[2]int64[2]
1 .. 23 .. 45 .. 6
10 .. 2030 .. 4050 .. 60
']\n nbclass = table.conf.default_notebook_table_class\n masked = 'masked=True ' if t.masked else ''\n assert t._repr_html_().splitlines() == [\n f'
{table_type.__name__} {masked}length=2',\n f'',\n '',\n '',\n '',\n '',\n '
col0col1col2
int64[2]int64[2]int64[2]
1 .. 23 .. 45 .. 6
10 .. 2030 .. 4050 .. 60
']\n\n t = table_type([arr])\n lines = t.pformat(show_dtype=True)\n assert lines == [' col0 ',\n 'int64[2,2]',\n '----------',\n ' 1 .. 20',\n ' 3 .. 40',\n ' 5 .. 60']\n\n def test_fake_multidim(self, table_type):\n \"\"\"Test printing with 'fake' multidimensional column\"\"\"\n arr = [np.array([[(1,)],\n [(10,)]], dtype=np.int64),\n np.array([[(3,)],\n [(30,)]], dtype=np.int64),\n np.array([[(5,)],\n [(50,)]], dtype=np.int64)]\n t = table_type(arr)\n lines = t.pformat(show_dtype=True)\n assert lines == [\n \" col0 col1 col2 \",\n \"int64[1,1] int64[1,1] int64[1,1]\",\n \"---------- ---------- ----------\",\n \" 1 3 5\",\n \" 10 30 50\"]\n\n lines = t.pformat(html=True, show_dtype=True)\n assert lines == [\n f'',\n '',\n '',\n '',\n '',\n '
col0col1col2
int64[1,1]int64[1,1]int64[1,1]
135
103050
']\n nbclass = table.conf.default_notebook_table_class\n masked = 'masked=True ' if t.masked else ''\n assert t._repr_html_().splitlines() == [\n f'
{table_type.__name__} {masked}length=2',\n f'',\n '',\n '',\n '',\n '',\n '
col0col1col2
int64[1,1]int64[1,1]int64[1,1]
135
103050
']\n\n t = table_type([arr])\n lines = t.pformat(show_dtype=True)\n assert lines == [' col0 ',\n 'int64[2,1,1]',\n '------------',\n ' 1 .. 10',\n ' 3 .. 30',\n ' 5 .. 50']\n\n\ndef test_html_escaping():\n t = table.Table([('', 2, 3)])\n nbclass = table.conf.default_notebook_table_class\n assert t._repr_html_().splitlines() == [\n '
Table length=3',\n f'',\n '',\n '',\n '',\n '',\n '',\n '
col0
str33
&lt;script&gt;alert(&quot;gotcha&quot;);&lt;/script&gt;
2
3
']\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestPprint():\n\n def _setup(self, table_type):\n self.tb = table_type(BIG_WIDE_ARR)\n self.tb['col0'].format = 'e'\n self.tb['col1'].format = '.6f'\n\n self.tb['col0'].unit = 'km**2'\n self.tb['col19'].unit = 'kg s m**-2'\n self.ts = table_type(SMALL_ARR)\n\n def test_empty_table(self, table_type):\n t = table_type()\n lines = t.pformat()\n assert lines == ['']\n c = repr(t)\n masked = 'masked=True ' if t.masked else ''\n assert c.splitlines() == [f'<{table_type.__name__} {masked}length=0>',\n '']\n\n def test_format0(self, table_type):\n \"\"\"Try getting screen size but fail to defaults because testing doesn't\n have access to screen (fcntl.ioctl fails).\n \"\"\"\n self._setup(table_type)\n arr = np.arange(4000, dtype=np.float64).reshape(100, 40)\n lines = table_type(arr).pformat()\n nlines, width = console.terminal_size()\n assert len(lines) == nlines\n for line in lines[:-1]: # skip last \"Length = .. rows\" line\n assert width - 10 < len(line) <= width\n\n def test_format1(self, table_type):\n \"\"\"Basic test of formatting, unit header row included\"\"\"\n self._setup(table_type)\n lines = self.tb.pformat(max_lines=8, max_width=40)\n assert lines == [' col0 col1 ... col19 ',\n ' km2 ... kg s / m2',\n '------------ ----------- ... ---------',\n '0.000000e+00 1.000000 ... 19.0',\n ' ... ... ... ...',\n '1.960000e+03 1961.000000 ... 1979.0',\n '1.980000e+03 1981.000000 ... 1999.0',\n 'Length = 100 rows']\n\n def test_format2(self, table_type):\n \"\"\"Basic test of formatting, unit header row excluded\"\"\"\n self._setup(table_type)\n lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=False)\n assert lines == [' col0 col1 ... col19 ',\n '------------ ----------- ... ------',\n '0.000000e+00 1.000000 ... 19.0',\n '2.000000e+01 21.000000 ... 39.0',\n ' ... ... ... ...',\n '1.960000e+03 1961.000000 ... 1979.0',\n '1.980000e+03 1981.000000 ... 1999.0',\n 'Length = 100 rows']\n\n def test_format3(self, table_type):\n \"\"\"Include the unit header row\"\"\"\n self._setup(table_type)\n lines = self.tb.pformat(max_lines=8, max_width=40, show_unit=True)\n\n assert lines == [' col0 col1 ... col19 ',\n ' km2 ... kg s / m2',\n '------------ ----------- ... ---------',\n '0.000000e+00 1.000000 ... 19.0',\n ' ... ... ... ...',\n '1.960000e+03 1961.000000 ... 1979.0',\n '1.980000e+03 1981.000000 ... 1999.0',\n 'Length = 100 rows']\n\n def test_format4(self, table_type):\n \"\"\"Do not include the name header row\"\"\"\n self._setup(table_type)\n lines = self.tb.pformat(max_lines=8, max_width=40, show_name=False)\n assert lines == [' km2 ... kg s / m2',\n '------------ ----------- ... ---------',\n '0.000000e+00 1.000000 ... 19.0',\n '2.000000e+01 21.000000 ... 39.0',\n ' ... ... ... ...',\n '1.960000e+03 1961.000000 ... 1979.0',\n '1.980000e+03 1981.000000 ... 1999.0',\n 'Length = 100 rows']\n\n def test_noclip(self, table_type):\n \"\"\"Basic table print\"\"\"\n self._setup(table_type)\n lines = self.ts.pformat(max_lines=-1, max_width=-1)\n assert lines == ['col0 col1 col2',\n '---- ---- ----',\n ' 0 1 2',\n ' 3 4 5',\n ' 6 7 8',\n ' 9 10 11',\n ' 12 13 14',\n ' 15 16 17']\n\n def test_clip1(self, table_type):\n \"\"\"max lines below hard limit of 8\n \"\"\"\n self._setup(table_type)\n lines = self.ts.pformat(max_lines=3, max_width=-1)\n assert lines == ['col0 col1 col2',\n '---- ---- ----',\n ' 0 1 2',\n ' 3 4 5',\n ' 6 7 8',\n ' 9 10 11',\n ' 12 13 14',\n ' 15 16 17']\n\n def test_clip2(self, table_type):\n \"\"\"max lines below hard limit of 8 and output longer than 8\n \"\"\"\n self._setup(table_type)\n lines = self.ts.pformat(max_lines=3, max_width=-1, show_unit=True, show_dtype=True)\n assert lines == [' col0 col1 col2',\n ' ',\n 'int64 int64 int64',\n '----- ----- -----',\n ' 0 1 2',\n ' ... ... ...',\n ' 15 16 17',\n 'Length = 6 rows']\n\n def test_clip3(self, table_type):\n \"\"\"Max lines below hard limit of 8 and max width below hard limit\n of 10\n \"\"\"\n self._setup(table_type)\n lines = self.ts.pformat(max_lines=3, max_width=1, show_unit=True)\n assert lines == ['col0 ...',\n ' ...',\n '---- ...',\n ' 0 ...',\n ' ... ...',\n ' 12 ...',\n ' 15 ...',\n 'Length = 6 rows']\n\n def test_clip4(self, table_type):\n \"\"\"Test a range of max_lines\"\"\"\n self._setup(table_type)\n for max_lines in (0, 1, 4, 5, 6, 7, 8, 100, 101, 102, 103, 104, 130):\n lines = self.tb.pformat(max_lines=max_lines, show_unit=False)\n assert len(lines) == max(8, min(102, max_lines))\n\n def test_pformat_all(self, table_type):\n \"\"\"Test that all rows are printed by default\"\"\"\n self._setup(table_type)\n lines = self.tb.pformat_all()\n # +3 accounts for the three header lines in this table\n assert len(lines) == BIG_WIDE_ARR.shape[0] + 3\n\n @pytest.fixture\n def test_pprint_all(self, table_type, capsys):\n \"\"\"Test that all rows are printed by default\"\"\"\n self._setup(table_type)\n self.tb.pprint_all()\n (out, err) = capsys.readouterr()\n # +3 accounts for the three header lines in this table\n assert len(out) == BIG_WIDE_ARR.shape[0] + 3\n\n\n@pytest.mark.usefixtures('table_type')\nclass TestFormat():\n\n def test_column_format(self, table_type):\n t = table_type([[1, 2], [3, 4]], names=('a', 'b'))\n # default (format=None)\n assert str(t['a']) == ' a \\n---\\n 1\\n 2'\n\n # just a plain format string\n t['a'].format = '5.2f'\n assert str(t['a']) == ' a \\n-----\\n 1.00\\n 2.00'\n\n # Old-style that is almost new-style\n t['a'].format = '{ %4.2f }'\n assert str(t['a']) == ' a \\n--------\\n{ 1.00 }\\n{ 2.00 }'\n\n # New-style that is almost old-style\n t['a'].format = '%{0:}'\n assert str(t['a']) == ' a \\n---\\n %1\\n %2'\n\n # New-style with extra spaces\n t['a'].format = ' {0:05d} '\n assert str(t['a']) == ' a \\n-------\\n 00001 \\n 00002 '\n\n # New-style has precedence\n t['a'].format = '%4.2f {0:}'\n assert str(t['a']) == ' a \\n-------\\n%4.2f 1\\n%4.2f 2'\n\n # Invalid format spec\n with pytest.raises(ValueError):\n t['a'].format = 'fail'\n assert t['a'].format == '%4.2f {0:}' # format did not change\n\n def test_column_format_with_threshold(self, table_type):\n from astropy import conf\n with conf.set_temp('max_lines', 8):\n t = table_type([np.arange(20)], names=['a'])\n t['a'].format = '%{0:}'\n assert str(t['a']).splitlines() == [' a ',\n '---',\n ' %0',\n ' %1',\n '...',\n '%18',\n '%19',\n 'Length = 20 rows']\n t['a'].format = '{ %4.2f }'\n assert str(t['a']).splitlines() == [' a ',\n '---------',\n ' { 0.00 }',\n ' { 1.00 }',\n ' ...',\n '{ 18.00 }',\n '{ 19.00 }',\n 'Length = 20 rows']\n\n def test_column_format_func(self, table_type):\n # run most of functions twice\n # 1) astropy.table.pprint._format_funcs gets populated\n # 2) astropy.table.pprint._format_funcs gets used\n\n t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))\n\n # mathematical function\n t['a'].format = lambda x: str(x * 3.)\n assert str(t['a']) == ' a \\n---\\n3.0\\n6.0'\n assert str(t['a']) == ' a \\n---\\n3.0\\n6.0'\n\n def test_column_format_callable(self, table_type):\n # run most of functions twice\n # 1) astropy.table.pprint._format_funcs gets populated\n # 2) astropy.table.pprint._format_funcs gets used\n\n t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))\n\n # mathematical function\n class format:\n def __call__(self, x):\n return str(x * 3.)\n t['a'].format = format()\n assert str(t['a']) == ' a \\n---\\n3.0\\n6.0'\n assert str(t['a']) == ' a \\n---\\n3.0\\n6.0'\n\n def test_column_format_func_wrong_number_args(self, table_type):\n t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))\n\n # function that expects wrong number of arguments\n def func(a, b):\n pass\n\n with pytest.raises(ValueError):\n t['a'].format = func\n\n def test_column_format_func_multiD(self, table_type):\n arr = [np.array([[1, 2],\n [10, 20]], dtype='i8')]\n t = table_type(arr, names=['a'])\n\n # mathematical function\n t['a'].format = lambda x: str(x * 3.)\n outstr = (' a \\n'\n '------------\\n'\n ' 3.0 .. 6.0\\n'\n '30.0 .. 60.0')\n assert str(t['a']) == outstr\n\n def test_column_format_func_not_str(self, table_type):\n t = table_type([[1., 2.], [3, 4]], names=('a', 'b'))\n\n # mathematical function\n with pytest.raises(ValueError):\n t['a'].format = lambda x: x * 3\n\n def test_column_alignment(self, table_type):\n t = table_type([[1], [2], [3], [4]],\n names=('long title a', 'long title b',\n 'long title c', 'long title d'))\n t['long title a'].format = '<'\n t['long title b'].format = '^'\n t['long title c'].format = '>'\n t['long title d'].format = '0='\n assert str(t['long title a']) == 'long title a\\n------------\\n1 '\n assert str(t['long title b']) == 'long title b\\n------------\\n 2 '\n assert str(t['long title c']) == 'long title c\\n------------\\n 3'\n assert str(t['long title d']) == 'long title d\\n------------\\n000000000004'\n\n\nclass TestFormatWithMaskedElements():\n\n def test_column_format(self):\n t = Table([[1, 2, 3], [3, 4, 5]], names=('a', 'b'), masked=True)\n t['a'].mask = [True, False, True]\n # default (format=None)\n assert str(t['a']) == ' a \\n---\\n --\\n 2\\n --'\n\n # just a plain format string\n t['a'].format = '5.2f'\n assert str(t['a']) == ' a \\n-----\\n --\\n 2.00\\n --'\n\n # Old-style that is almost new-style\n t['a'].format = '{ %4.2f }'\n assert str(t['a']) == ' a \\n--------\\n --\\n{ 2.00 }\\n --'\n\n # New-style that is almost old-style\n t['a'].format = '%{0:}'\n assert str(t['a']) == ' a \\n---\\n --\\n %2\\n --'\n\n # New-style with extra spaces\n t['a'].format = ' {0:05d} '\n assert str(t['a']) == ' a \\n-------\\n --\\n 00002 \\n --'\n\n # New-style has precedence\n t['a'].format = '%4.2f {0:}'\n assert str(t['a']) == ' a \\n-------\\n --\\n%4.2f 2\\n --'\n\n def test_column_format_with_threshold_masked_table(self):\n from astropy import conf\n with conf.set_temp('max_lines', 8):\n t = Table([np.arange(20)], names=['a'], masked=True)\n t['a'].format = '%{0:}'\n t['a'].mask[0] = True\n t['a'].mask[-1] = True\n assert str(t['a']).splitlines() == [' a ',\n '---',\n ' --',\n ' %1',\n '...',\n '%18',\n ' --',\n 'Length = 20 rows']\n t['a'].format = '{ %4.2f }'\n assert str(t['a']).splitlines() == [' a ',\n '---------',\n ' --',\n ' { 1.00 }',\n ' ...',\n '{ 18.00 }',\n ' --',\n 'Length = 20 rows']\n\n def test_column_format_func(self):\n # run most of functions twice\n # 1) astropy.table.pprint._format_funcs gets populated\n # 2) astropy.table.pprint._format_funcs gets used\n\n t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)\n t['a'].mask = [True, False, True]\n # mathematical function\n t['a'].format = lambda x: str(x * 3.)\n assert str(t['a']) == ' a \\n---\\n --\\n6.0\\n --'\n assert str(t['a']) == ' a \\n---\\n --\\n6.0\\n --'\n\n def test_column_format_func_with_special_masked(self):\n # run most of functions twice\n # 1) astropy.table.pprint._format_funcs gets populated\n # 2) astropy.table.pprint._format_funcs gets used\n\n t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)\n t['a'].mask = [True, False, True]\n # mathematical function\n\n def format_func(x):\n if x is np.ma.masked:\n return '!!'\n else:\n return str(x * 3.)\n t['a'].format = format_func\n assert str(t['a']) == ' a \\n---\\n !!\\n6.0\\n !!'\n assert str(t['a']) == ' a \\n---\\n !!\\n6.0\\n !!'\n\n def test_column_format_callable(self):\n # run most of functions twice\n # 1) astropy.table.pprint._format_funcs gets populated\n # 2) astropy.table.pprint._format_funcs gets used\n\n t = Table([[1., 2., 3.], [3, 4, 5]], names=('a', 'b'), masked=True)\n t['a'].mask = [True, False, True]\n\n # mathematical function\n class format:\n def __call__(self, x):\n return str(x * 3.)\n t['a'].format = format()\n assert str(t['a']) == ' a \\n---\\n --\\n6.0\\n --'\n assert str(t['a']) == ' a \\n---\\n --\\n6.0\\n --'\n\n def test_column_format_func_wrong_number_args(self):\n t = Table([[1., 2.], [3, 4]], names=('a', 'b'), masked=True)\n t['a'].mask = [True, False]\n\n # function that expects wrong number of arguments\n def func(a, b):\n pass\n\n with pytest.raises(ValueError):\n t['a'].format = func\n\n # but if all are masked, it never gets called\n t['a'].mask = [True, True]\n assert str(t['a']) == ' a \\n---\\n --\\n --'\n\n def test_column_format_func_multiD(self):\n arr = [np.array([[1, 2],\n [10, 20]], dtype='i8')]\n t = Table(arr, names=['a'], masked=True)\n t['a'].mask[0, 1] = True\n t['a'].mask[1, 1] = True\n # mathematical function\n t['a'].format = lambda x: str(x * 3.)\n outstr = (' a \\n'\n '----------\\n'\n ' 3.0 .. --\\n'\n '30.0 .. --')\n assert str(t['a']) == outstr\n assert str(t['a']) == outstr\n\n\ndef test_pprint_npfloat32():\n \"\"\"\n Test for #148, that np.float32 cannot by itself be formatted as float,\n but has to be converted to a python float.\n \"\"\"\n dat = np.array([1., 2.], dtype=np.float32)\n t = Table([dat], names=['a'])\n t['a'].format = '5.2f'\n assert str(t['a']) == ' a \\n-----\\n 1.00\\n 2.00'\n\n\ndef test_pprint_py3_bytes():\n \"\"\"\n Test for #1346 and #4944. Make sure a bytestring (dtype=S) in Python 3\n is printed correctly (without the \"b\" prefix like b'string').\n \"\"\"\n val = bytes('val', encoding='utf-8')\n blah = 'bläh'.encode()\n dat = np.array([val, blah], dtype=[('col', 'S10')])\n t = table.Table(dat)\n assert t['col'].pformat() == ['col ', '----', ' val', 'bläh']\n\n\ndef test_pprint_structured():\n su = table.Column([(1, (1.5, [1.6, 1.7])),\n (2, (2.5, [2.6, 2.7]))],\n name='su',\n dtype=[('i', np.int64),\n ('f', [('p0', np.float64), ('p1', np.float64, (2,))])])\n assert su.pformat() == [\n \" su [i, f[p0, p1]] \",\n \"----------------------\",\n \"(1, (1.5, [1.6, 1.7]))\",\n \"(2, (2.5, [2.6, 2.7]))\"]\n t = table.Table([su])\n assert t.pformat() == su.pformat()\n assert repr(t).splitlines() == [\n \"\",\n \" su [i, f[p0, p1]] \",\n \"(int64, (float64, float64[2]))\",\n \"------------------------------\",\n \" (1, (1.5, [1.6, 1.7]))\",\n \" (2, (2.5, [2.6, 2.7]))\"]\n\n\ndef test_pprint_structured_with_format():\n dtype = np.dtype([('par', 'f8'), ('min', 'f8'), ('id', 'i4'), ('name', 'U4')])\n c = table.Column([(1.2345678, -20, 3, 'bar'),\n (12.345678, 4.5678, 33, 'foo')], dtype=dtype)\n t = table.Table()\n t['a'] = [1, 2]\n t['c'] = c\n t['c'].info.format = '{par:6.2f} {min:5.1f} {id:03d} {name:4s}'\n exp = [\n ' a c [par, min, id, name]',\n '--- ----------------------',\n ' 1 1.23 -20.0 003 bar ',\n ' 2 12.35 4.6 033 foo ']\n assert t.pformat_all() == exp\n\n\ndef test_pprint_nameless_col():\n \"\"\"Regression test for #2213, making sure a nameless column can be printed\n using None as the name.\n \"\"\"\n col = table.Column([1., 2.])\n assert str(col).startswith('None')\n\n\ndef test_html():\n \"\"\"Test HTML printing\"\"\"\n dat = np.array([1., 2.], dtype=np.float32)\n t = Table([dat], names=['a'])\n\n lines = t.pformat(html=True)\n assert lines == [f'
',\n '',\n '',\n '',\n '
a
1.0
2.0
']\n\n lines = t.pformat(html=True, tableclass='table-striped')\n assert lines == [\n f'',\n '',\n '',\n '',\n '
a
1.0
2.0
']\n\n lines = t.pformat(html=True, tableclass=['table', 'table-striped'])\n assert lines == [\n f'',\n '',\n '',\n '',\n '
a
1.0
2.0
']\n\n\ndef test_align():\n t = simple_table(2, kinds='iS')\n assert t.pformat() == [' a b ',\n '--- ---',\n ' 1 b',\n ' 2 c']\n # Use column format attribute\n t['a'].format = '<'\n assert t.pformat() == [' a b ',\n '--- ---',\n '1 b',\n '2 c']\n\n # Now override column format attribute with various combinations of align\n tpf = [' a b ',\n '--- ---',\n ' 1 b ',\n ' 2 c ']\n for align in ('^', ['^', '^'], ('^', '^')):\n assert tpf == t.pformat(align=align)\n\n assert t.pformat(align='<') == [' a b ',\n '--- ---',\n '1 b ',\n '2 c ']\n assert t.pformat(align='0=') == [' a b ',\n '--- ---',\n '001 00b',\n '002 00c']\n\n assert t.pformat(align=['<', '^']) == [' a b ',\n '--- ---',\n '1 b ',\n '2 c ']\n\n # Now use fill characters. Stress the system using a fill\n # character that is the same as an align character.\n t = simple_table(2, kinds='iS')\n\n assert t.pformat(align='^^') == [' a b ',\n '--- ---',\n '^1^ ^b^',\n '^2^ ^c^']\n\n assert t.pformat(align='^>') == [' a b ',\n '--- ---',\n '^^1 ^^b',\n '^^2 ^^c']\n\n assert t.pformat(align='^<') == [' a b ',\n '--- ---',\n '1^^ b^^',\n '2^^ c^^']\n\n # Complicated interaction (same as narrative docs example)\n t1 = Table([[1.0, 2.0], [1, 2]], names=['column1', 'column2'])\n t1['column1'].format = '#^.2f'\n\n assert t1.pformat() == ['column1 column2',\n '------- -------',\n '##1.00# 1',\n '##2.00# 2']\n\n assert t1.pformat(align='!<') == ['column1 column2',\n '------- -------',\n '1.00!!! 1!!!!!!',\n '2.00!!! 2!!!!!!']\n\n assert t1.pformat(align=[None, '!<']) == ['column1 column2',\n '------- -------',\n '##1.00# 1!!!!!!',\n '##2.00# 2!!!!!!']\n\n # Zero fill\n t['a'].format = '+d'\n assert t.pformat(align='0=') == [' a b ',\n '--- ---',\n '+01 00b',\n '+02 00c']\n\n with pytest.raises(ValueError):\n t.pformat(align=['fail'])\n\n with pytest.raises(TypeError):\n t.pformat(align=0)\n\n with pytest.raises(TypeError):\n t.pprint(align=0)\n\n # Make sure pprint() does not raise an exception\n t.pprint()\n\n with pytest.raises(ValueError):\n t.pprint(align=['<', '<', '<'])\n\n with pytest.raises(ValueError):\n t.pprint(align='x=')\n\n\ndef test_auto_format_func():\n \"\"\"Test for #5802 (fix for #5800 where format_func key is not unique)\"\"\"\n t = Table([[1, 2] * u.m])\n t['col0'].format = '%f'\n t.pformat() # Force caching of format function\n\n qt = QTable(t)\n qt.pformat() # Generates exception prior to #5802\n\n\ndef test_decode_replace():\n \"\"\"\n Test printing a bytestring column with a value that fails\n decoding to utf-8 and gets replaced by U+FFFD. See\n https://docs.python.org/3/library/codecs.html#codecs.replace_errors\n \"\"\"\n t = Table([[b'Z\\xf0']])\n assert t.pformat() == ['col0', '----', ' Z\\ufffd']\n\n\nclass TestColumnsShowHide:\n \"\"\"Tests of show and hide table columns\"\"\"\n def setup_method(self):\n self.t = simple_table(size=1, cols=4, kinds='i')\n\n @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))\n def test_basic(self, attr):\n t = self.t\n assert repr(getattr(Table, attr)) == f''\n\n t_show_hide = getattr(t, attr)\n assert repr(t_show_hide) == f''\n\n # Default value is None\n assert t_show_hide() is None\n\n def test_slice(self):\n t = self.t\n t.pprint_include_names = 'a'\n t.pprint_exclude_names = 'b'\n t2 = t[0:1]\n assert t2.pprint_include_names() == ('a',)\n assert t2.pprint_exclude_names() == ('b',)\n\n def test_copy(self):\n t = self.t\n t.pprint_include_names = 'a'\n t.pprint_exclude_names = 'b'\n\n t2 = t.copy()\n assert t2.pprint_include_names() == ('a',)\n assert t2.pprint_exclude_names() == ('b',)\n\n t2.pprint_include_names = 'c'\n t2.pprint_exclude_names = 'd'\n assert t.pprint_include_names() == ('a',)\n assert t.pprint_exclude_names() == ('b',)\n assert t2.pprint_include_names() == ('c',)\n assert t2.pprint_exclude_names() == ('d',)\n\n @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))\n @pytest.mark.parametrize('value', ('z', ['a', 'z']))\n def test_setting(self, attr, value):\n t = self.t\n t_show_hide = getattr(t, attr)\n\n # Expected attribute value ('z',) or ('a', 'z')\n exp = (value,) if isinstance(value, str) else tuple(value)\n\n # Context manager, can include column names that do not exist\n with t_show_hide.set(value):\n assert t_show_hide() == exp\n assert t.meta['__attributes__'] == {attr: exp}\n assert t_show_hide() is None\n\n # Setting back to None clears out meta\n assert t.meta == {}\n\n # Do `t.pprint_include_names/hide = value`\n setattr(t, attr, value)\n assert t_show_hide() == exp\n\n # Clear attribute\n t_show_hide.set(None)\n assert t_show_hide() is None\n\n # Now use set() method\n t_show_hide.set(value)\n assert t_show_hide() == exp\n\n with t_show_hide.set(None):\n assert t_show_hide() is None\n assert t.meta == {}\n assert t_show_hide() == exp\n\n @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))\n @pytest.mark.parametrize('value', ('z', ['a', 'z'], ('a', 'z')))\n def test_add_remove(self, attr, value):\n t = self.t\n t_show_hide = getattr(t, attr)\n\n # Expected attribute value ('z') or ('a', 'z')\n exp = (value,) if isinstance(value, str) else tuple(value)\n\n # add() method for str or list of str\n t_show_hide.add(value)\n assert t_show_hide() == exp\n\n # Adding twice has no effect\n t_show_hide.add(value)\n assert t_show_hide() == exp\n\n # Remove values (str or list of str). Reverts to None if all names are\n # removed.\n t_show_hide.remove(value)\n assert t_show_hide() is None\n\n # Remove just one name, possibly leaving a name.\n t_show_hide.add(value)\n t_show_hide.remove('z')\n assert t_show_hide() == (None if value == 'z' else ('a',))\n\n # Cannot remove name not in the list\n t_show_hide.set(['a', 'z'])\n with pytest.raises(ValueError, match=f'x not in {attr}'):\n t_show_hide.remove(('x', 'z'))\n\n @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))\n def test_rename(self, attr):\n t = self.t\n t_hide_show = getattr(t, attr)\n t_hide_show.set(['a', 'b'])\n t.rename_column('a', 'aa')\n assert t_hide_show() == ('aa', 'b')\n\n @pytest.mark.parametrize('attr', ('pprint_exclude_names', 'pprint_include_names'))\n def test_remove(self, attr):\n t = self.t\n t_hide_show = getattr(t, attr)\n t_hide_show.set(['a', 'b'])\n del t['a']\n assert t_hide_show() == ('b',)\n\n def test_serialization(self):\n # Serialization works for ECSV. Currently fails for FITS, works with\n # HDF5.\n t = self.t\n t.pprint_exclude_names = ['a', 'y']\n t.pprint_include_names = ['b', 'z']\n\n out = StringIO()\n ascii.write(t, out, format='ecsv')\n t2 = ascii.read(out.getvalue(), format='ecsv')\n\n assert t2.pprint_exclude_names() == ('a', 'y')\n assert t2.pprint_include_names() == ('b', 'z')\n\n def test_output(self):\n \"\"\"Test that pprint_include/exclude_names actually changes the print output\"\"\"\n t = self.t\n exp = [' b d ',\n '--- ---',\n ' 2 4']\n\n with t.pprint_exclude_names.set(['a', 'c']):\n out = t.pformat_all()\n assert out == exp\n\n with t.pprint_include_names.set(['b', 'd']):\n out = t.pformat_all()\n assert out == exp\n\n with t.pprint_exclude_names.set(['a', 'c']):\n out = t.pformat_all()\n assert out == exp\n\n with t.pprint_include_names.set(['b', 'd']):\n out = t.pformat_all()\n assert out == exp\n\n # Mixture (not common in practice but possible). Note, the trailing\n # backslash instead of parens is needed for Python < 3.9. See:\n # https://bugs.python.org/issue12782.\n with t.pprint_include_names.set(['b', 'c', 'd']), \\\n t.pprint_exclude_names.set(['c']):\n out = t.pformat_all()\n assert out == exp\n\n def test_output_globs(self):\n \"\"\"Test that pprint_include/exclude_names works with globs (fnmatch)\"\"\"\n t = self.t\n t['a2'] = 1\n t['a23'] = 2\n\n # Show only the a* columns\n exp = [' a a2 a23',\n '--- --- ---',\n ' 1 1 2']\n with t.pprint_include_names.set('a*'):\n out = t.pformat_all()\n assert out == exp\n\n # Show a* but exclude a??\n exp = [' a a2',\n '--- ---',\n ' 1 1']\n with t.pprint_include_names.set('a*'), t.pprint_exclude_names.set('a??'):\n out = t.pformat_all()\n assert out == exp\n\n # Exclude a??\n exp = [' a b c d a2',\n '--- --- --- --- ---',\n ' 1 2 3 4 1']\n with t.pprint_exclude_names.set('a??'):\n out = t.pformat_all()\n assert out == exp\n\n\ndef test_embedded_newline_tab():\n \"\"\"Newlines and tabs are escaped in table repr\"\"\"\n t = Table(rows=[['a', 'b \\n c \\t \\n d'], ['x', 'y\\n']])\n exp = [\n r'col0 col1 ',\n r'---- --------------',\n r' a b \\n c \\t \\n d',\n r' x y\\n']\n assert t.pformat_all() == exp\n"}}},{"rowIdx":1356,"cells":{"hash":{"kind":"string","value":"8fbec438884abdae59875fec7f5c73a67b2e83e264ef2f9b3360eb519795fbe5"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\nfrom astropy.table.bst import BST\n\n\ndef get_tree(TreeType):\n b = TreeType([], [])\n for val in [5, 2, 9, 3, 4, 1, 6, 10, 8, 7]:\n b.add(val)\n return b\n\n\n@pytest.fixture\ndef tree():\n return get_tree(BST)\n r'''\n 5\n / \\\n 2 9\n / \\ / \\\n 1 3 6 10\n \\ \\\n 4 8\n /\n 7\n '''\n\n\n@pytest.fixture\ndef bst(tree):\n return tree\n\n\ndef test_bst_add(bst):\n root = bst.root\n assert root.data == [5]\n assert root.left.data == [2]\n assert root.right.data == [9]\n assert root.left.left.data == [1]\n assert root.left.right.data == [3]\n assert root.right.left.data == [6]\n assert root.right.right.data == [10]\n assert root.left.right.right.data == [4]\n assert root.right.left.right.data == [8]\n assert root.right.left.right.left.data == [7]\n\n\ndef test_bst_dimensions(bst):\n assert bst.size == 10\n assert bst.height == 4\n\n\ndef test_bst_find(tree):\n bst = tree\n for i in range(1, 11):\n node = bst.find(i)\n assert node == [i]\n assert bst.find(0) == []\n assert bst.find(11) == []\n assert bst.find('1') == []\n\n\ndef test_bst_traverse(bst):\n preord = [5, 2, 1, 3, 4, 9, 6, 8, 7, 10]\n inord = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n postord = [1, 4, 3, 2, 7, 8, 6, 10, 9, 5]\n traversals = {}\n for order in ('preorder', 'inorder', 'postorder'):\n traversals[order] = [x.key for x in bst.traverse(order)]\n assert traversals['preorder'] == preord\n assert traversals['inorder'] == inord\n assert traversals['postorder'] == postord\n\n\ndef test_bst_remove(bst):\n order = (6, 9, 1, 3, 7, 2, 10, 5, 4, 8)\n vals = set(range(1, 11))\n for i, val in enumerate(order):\n assert bst.remove(val) is True\n assert bst.is_valid()\n assert {x.key for x in bst.traverse('inorder')} == \\\n vals.difference(order[:i + 1])\n assert bst.size == 10 - i - 1\n assert bst.remove(-val) is False\n\n\ndef test_bst_duplicate(bst):\n bst.add(10, 11)\n assert bst.find(10) == [10, 11]\n assert bst.remove(10, data=10) is True\n assert bst.find(10) == [11]\n with pytest.raises(ValueError):\n bst.remove(10, data=30) # invalid data\n assert bst.remove(10) is True\n assert bst.remove(10) is False\n\n\ndef test_bst_range(tree):\n bst = tree\n lst = bst.range_nodes(4, 8)\n assert sorted(x.key for x in lst) == [4, 5, 6, 7, 8]\n lst = bst.range_nodes(10, 11)\n assert [x.key for x in lst] == [10]\n lst = bst.range_nodes(11, 20)\n assert len(lst) == 0\n"}}},{"rowIdx":1357,"cells":{"hash":{"kind":"string","value":"c9669ce4699102b98897ed1e4a2276544fa6addced52988b6f7a59cdc309279b"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport itertools\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\n\nfrom astropy import units as u\nfrom astropy.convolution.convolve import convolve, convolve_fft\nfrom astropy.convolution.kernels import (Box2DKernel, Gaussian2DKernel,\n Moffat2DKernel, Tophat2DKernel)\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\n\nSHAPES_ODD = [[15, 15], [31, 31]]\nSHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] # FIXME: not used ?!\nNOSHAPE = [[None, None]]\nWIDTHS = [2, 3, 4, 5]\n\nKERNELS = []\n\nfor shape in SHAPES_ODD + NOSHAPE:\n for width in WIDTHS:\n\n KERNELS.append(Gaussian2DKernel(width,\n x_size=shape[0],\n y_size=shape[1],\n mode='oversample',\n factor=10))\n\n KERNELS.append(Box2DKernel(width,\n x_size=shape[0],\n y_size=shape[1],\n mode='oversample',\n factor=10))\n\n KERNELS.append(Tophat2DKernel(width,\n x_size=shape[0],\n y_size=shape[1],\n mode='oversample',\n factor=10))\n KERNELS.append(Moffat2DKernel(width, 2,\n x_size=shape[0],\n y_size=shape[1],\n mode='oversample',\n factor=10))\n\n\nclass Test2DConvolutions:\n\n @pytest.mark.parametrize('kernel', KERNELS)\n def test_centered_makekernel(self, kernel):\n \"\"\"\n Test smoothing of an image with a single positive pixel\n \"\"\"\n\n shape = kernel.array.shape\n\n x = np.zeros(shape)\n xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)\n x[xslice] = 1.0\n\n c2 = convolve_fft(x, kernel, boundary='fill')\n c1 = convolve(x, kernel, boundary='fill')\n\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize('kernel', KERNELS)\n def test_random_makekernel(self, kernel):\n \"\"\"\n Test smoothing of an image made of random noise\n \"\"\"\n\n shape = kernel.array.shape\n\n x = np.random.randn(*shape)\n\n c2 = convolve_fft(x, kernel, boundary='fill')\n c1 = convolve(x, kernel, boundary='fill')\n\n # not clear why, but these differ by a couple ulps...\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS)))\n def test_uniform_smallkernel(self, shape, width):\n \"\"\"\n Test smoothing of an image with a single positive pixel\n\n Uses a simple, small kernel\n \"\"\"\n\n if width % 2 == 0:\n # convolve does not accept odd-shape kernels\n return\n\n kernel = np.ones([width, width])\n\n x = np.zeros(shape)\n xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)\n x[xslice] = 1.0\n\n c2 = convolve_fft(x, kernel, boundary='fill')\n c1 = convolve(x, kernel, boundary='fill')\n\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5])))\n def test_smallkernel_Box2DKernel(self, shape, width):\n \"\"\"\n Test smoothing of an image with a single positive pixel\n\n Compares a small uniform kernel to the Box2DKernel\n \"\"\"\n\n kernel1 = np.ones([width, width]) / float(width) ** 2\n kernel2 = Box2DKernel(width, mode='oversample', factor=10)\n\n x = np.zeros(shape)\n xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)\n x[xslice] = 1.0\n\n c2 = convolve_fft(x, kernel2, boundary='fill')\n c1 = convolve_fft(x, kernel1, boundary='fill')\n\n assert_almost_equal(c1, c2, decimal=12)\n\n c2 = convolve(x, kernel2, boundary='fill')\n c1 = convolve(x, kernel1, boundary='fill')\n\n assert_almost_equal(c1, c2, decimal=12)\n\n\ndef test_gaussian_2d_kernel_quantity():\n # Make sure that the angle can be a quantity\n kernel1 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=45 * u.deg)\n kernel2 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=np.pi / 4)\n assert_allclose(kernel1.array, kernel2.array)\n"}}},{"rowIdx":1358,"cells":{"hash":{"kind":"string","value":"014f37139b8e0c333e107a21dffcffae1be054109a16871a411cda82246ef5b4"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport itertools\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\n\nfrom astropy.convolution.convolve import convolve, convolve_fft\nfrom astropy.convolution.kernels import (AiryDisk2DKernel, Box1DKernel, Box2DKernel, CustomKernel,\n Gaussian1DKernel, Gaussian2DKernel, Kernel1D, Kernel2D,\n Model1DKernel, Model2DKernel, RickerWavelet1DKernel,\n RickerWavelet2DKernel, Ring2DKernel, Tophat2DKernel,\n Trapezoid1DKernel, TrapezoidDisk2DKernel)\nfrom astropy.convolution.utils import KernelSizeError\nfrom astropy.modeling.models import Box2D, Gaussian1D, Gaussian2D\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY # noqa\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nWIDTHS_ODD = [3, 5, 7, 9]\nWIDTHS_EVEN = [2, 4, 8, 16]\nMODES = ['center', 'linear_interp', 'oversample', 'integrate']\nKERNEL_TYPES = [Gaussian1DKernel, Gaussian2DKernel,\n Box1DKernel, Box2DKernel,\n Trapezoid1DKernel, TrapezoidDisk2DKernel,\n RickerWavelet1DKernel, Tophat2DKernel, AiryDisk2DKernel,\n Ring2DKernel]\n\n\nNUMS = [1, 1., np.float32(1.), np.float64(1.)]\n\n\n# Test data\ndelta_pulse_1D = np.zeros(81)\ndelta_pulse_1D[40] = 1\n\ndelta_pulse_2D = np.zeros((81, 81))\ndelta_pulse_2D[40, 40] = 1\n\nrandom_data_1D = np.random.rand(61)\nrandom_data_2D = np.random.rand(61, 61)\n\n\nclass TestKernels:\n \"\"\"\n Test class for the built-in convolution kernels.\n \"\"\"\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.parametrize(('width'), WIDTHS_ODD)\n def test_scipy_filter_gaussian(self, width):\n \"\"\"\n Test GaussianKernel against SciPy ndimage gaussian filter.\n \"\"\"\n from scipy.ndimage import gaussian_filter\n\n gauss_kernel_1D = Gaussian1DKernel(width)\n gauss_kernel_1D.normalize()\n gauss_kernel_2D = Gaussian2DKernel(width)\n gauss_kernel_2D.normalize()\n\n astropy_1D = convolve(delta_pulse_1D, gauss_kernel_1D, boundary='fill')\n astropy_2D = convolve(delta_pulse_2D, gauss_kernel_2D, boundary='fill')\n\n scipy_1D = gaussian_filter(delta_pulse_1D, width)\n scipy_2D = gaussian_filter(delta_pulse_2D, width)\n\n assert_almost_equal(astropy_1D, scipy_1D, decimal=12)\n assert_almost_equal(astropy_2D, scipy_2D, decimal=12)\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.parametrize(('width'), WIDTHS_ODD)\n def test_scipy_filter_gaussian_laplace(self, width):\n \"\"\"\n Test RickerWavelet kernels against SciPy ndimage gaussian laplace filters.\n \"\"\"\n from scipy.ndimage import gaussian_laplace\n\n ricker_kernel_1D = RickerWavelet1DKernel(width)\n ricker_kernel_2D = RickerWavelet2DKernel(width)\n\n astropy_1D = convolve(delta_pulse_1D, ricker_kernel_1D, boundary='fill', normalize_kernel=False)\n astropy_2D = convolve(delta_pulse_2D, ricker_kernel_2D, boundary='fill', normalize_kernel=False)\n\n with pytest.raises(Exception) as exc:\n astropy_1D = convolve(delta_pulse_1D, ricker_kernel_1D, boundary='fill', normalize_kernel=True)\n assert 'sum is close to zero' in exc.value.args[0]\n\n with pytest.raises(Exception) as exc:\n astropy_2D = convolve(delta_pulse_2D, ricker_kernel_2D, boundary='fill', normalize_kernel=True)\n assert 'sum is close to zero' in exc.value.args[0]\n\n # The Laplace of Gaussian filter is an inverted Ricker Wavelet filter.\n scipy_1D = -gaussian_laplace(delta_pulse_1D, width)\n scipy_2D = -gaussian_laplace(delta_pulse_2D, width)\n\n # There is a slight deviation in the normalization. They differ by a\n # factor of ~1.0000284132604045. The reason is not known.\n assert_almost_equal(astropy_1D, scipy_1D, decimal=5)\n assert_almost_equal(astropy_2D, scipy_2D, decimal=5)\n\n @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))\n def test_delta_data(self, kernel_type, width):\n \"\"\"\n Test smoothing of an image with a single positive pixel\n \"\"\"\n if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:\n pytest.skip(\"Omitting AiryDisk2DKernel, which requires SciPy\")\n if not kernel_type == Ring2DKernel:\n kernel = kernel_type(width)\n else:\n kernel = kernel_type(width, width * 0.2)\n\n if kernel.dimension == 1:\n c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)\n c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)\n assert_almost_equal(c1, c2, decimal=12)\n else:\n c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)\n c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize(('kernel_type', 'width'), list(itertools.product(KERNEL_TYPES, WIDTHS_ODD)))\n def test_random_data(self, kernel_type, width):\n \"\"\"\n Test smoothing of an image made of random noise\n \"\"\"\n if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:\n pytest.skip(\"Omitting AiryDisk2DKernel, which requires SciPy\")\n if not kernel_type == Ring2DKernel:\n kernel = kernel_type(width)\n else:\n kernel = kernel_type(width, width * 0.2)\n\n if kernel.dimension == 1:\n c1 = convolve_fft(random_data_1D, kernel, boundary='fill', normalize_kernel=False)\n c2 = convolve(random_data_1D, kernel, boundary='fill', normalize_kernel=False)\n assert_almost_equal(c1, c2, decimal=12)\n else:\n c1 = convolve_fft(random_data_2D, kernel, boundary='fill', normalize_kernel=False)\n c2 = convolve(random_data_2D, kernel, boundary='fill', normalize_kernel=False)\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize(('width'), WIDTHS_ODD)\n def test_uniform_smallkernel(self, width):\n \"\"\"\n Test smoothing of an image with a single positive pixel\n\n Instead of using kernel class, uses a simple, small kernel\n \"\"\"\n kernel = np.ones([width, width])\n\n c2 = convolve_fft(delta_pulse_2D, kernel, boundary='fill')\n c1 = convolve(delta_pulse_2D, kernel, boundary='fill')\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize(('width'), WIDTHS_ODD)\n def test_smallkernel_vs_Box2DKernel(self, width):\n \"\"\"\n Test smoothing of an image with a single positive pixel\n \"\"\"\n kernel1 = np.ones([width, width]) / width ** 2\n kernel2 = Box2DKernel(width)\n\n c2 = convolve_fft(delta_pulse_2D, kernel2, boundary='fill')\n c1 = convolve_fft(delta_pulse_2D, kernel1, boundary='fill')\n\n assert_almost_equal(c1, c2, decimal=12)\n\n def test_convolve_1D_kernels(self):\n \"\"\"\n Check if convolving two kernels with each other works correctly.\n \"\"\"\n gauss_1 = Gaussian1DKernel(3)\n gauss_2 = Gaussian1DKernel(4)\n test_gauss_3 = Gaussian1DKernel(5)\n\n with pytest.warns(AstropyUserWarning, match=r'Both array and kernel '\n r'are Kernel instances'):\n gauss_3 = convolve(gauss_1, gauss_2)\n\n assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)\n\n def test_convolve_2D_kernels(self):\n \"\"\"\n Check if convolving two kernels with each other works correctly.\n \"\"\"\n gauss_1 = Gaussian2DKernel(3)\n gauss_2 = Gaussian2DKernel(4)\n test_gauss_3 = Gaussian2DKernel(5)\n\n with pytest.warns(AstropyUserWarning, match=r'Both array and kernel '\n r'are Kernel instances'):\n gauss_3 = convolve(gauss_1, gauss_2)\n\n assert np.all(np.abs((gauss_3 - test_gauss_3).array) < 0.01)\n\n @pytest.mark.parametrize(('number'), NUMS)\n def test_multiply_scalar(self, number):\n \"\"\"\n Check if multiplying a kernel with a scalar works correctly.\n \"\"\"\n gauss = Gaussian1DKernel(3)\n gauss_new = number * gauss\n assert_almost_equal(gauss_new.array, gauss.array * number, decimal=12)\n\n @pytest.mark.parametrize(('number'), NUMS)\n def test_multiply_scalar_type(self, number):\n \"\"\"\n Check if multiplying a kernel with a scalar works correctly.\n \"\"\"\n gauss = Gaussian1DKernel(3)\n gauss_new = number * gauss\n assert type(gauss_new) is Gaussian1DKernel\n\n @pytest.mark.parametrize(('number'), NUMS)\n def test_rmultiply_scalar_type(self, number):\n \"\"\"\n Check if multiplying a kernel with a scalar works correctly.\n \"\"\"\n gauss = Gaussian1DKernel(3)\n gauss_new = gauss * number\n assert type(gauss_new) is Gaussian1DKernel\n\n def test_multiply_kernel1d(self):\n \"\"\"Test that multiplying two 1D kernels raises an exception.\"\"\"\n gauss = Gaussian1DKernel(3)\n with pytest.raises(Exception):\n gauss * gauss\n\n def test_multiply_kernel2d(self):\n \"\"\"Test that multiplying two 2D kernels raises an exception.\"\"\"\n gauss = Gaussian2DKernel(3)\n with pytest.raises(Exception):\n gauss * gauss\n\n def test_multiply_kernel1d_kernel2d(self):\n \"\"\"\n Test that multiplying a 1D kernel with a 2D kernel raises an\n exception.\n \"\"\"\n with pytest.raises(Exception):\n Gaussian1DKernel(3) * Gaussian2DKernel(3)\n\n def test_add_kernel_scalar(self):\n \"\"\"Test that adding a scalar to a kernel raises an exception.\"\"\"\n with pytest.raises(Exception):\n Gaussian1DKernel(3) + 1\n\n def test_model_1D_kernel(self):\n \"\"\"\n Check Model1DKernel against Gaussian1Dkernel\n \"\"\"\n stddev = 5.\n gauss = Gaussian1D(1. / np.sqrt(2 * np.pi * stddev**2), 0, stddev)\n model_gauss_kernel = Model1DKernel(gauss, x_size=21)\n model_gauss_kernel.normalize()\n gauss_kernel = Gaussian1DKernel(stddev, x_size=21)\n assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,\n decimal=12)\n\n def test_model_2D_kernel(self):\n \"\"\"\n Check Model2DKernel against Gaussian2Dkernel\n \"\"\"\n stddev = 5.\n gauss = Gaussian2D(1. / (2 * np.pi * stddev**2), 0, 0, stddev, stddev)\n model_gauss_kernel = Model2DKernel(gauss, x_size=21)\n model_gauss_kernel.normalize()\n gauss_kernel = Gaussian2DKernel(stddev, x_size=21)\n assert_almost_equal(model_gauss_kernel.array, gauss_kernel.array,\n decimal=12)\n\n def test_custom_1D_kernel(self):\n \"\"\"\n Check CustomKernel against Box1DKernel.\n \"\"\"\n # Define one dimensional array:\n array = np.ones(5)\n custom = CustomKernel(array)\n custom.normalize()\n box = Box1DKernel(5)\n\n c2 = convolve(delta_pulse_1D, custom, boundary='fill')\n c1 = convolve(delta_pulse_1D, box, boundary='fill')\n assert_almost_equal(c1, c2, decimal=12)\n\n def test_custom_2D_kernel(self):\n \"\"\"\n Check CustomKernel against Box2DKernel.\n \"\"\"\n # Define one dimensional array:\n array = np.ones((5, 5))\n custom = CustomKernel(array)\n custom.normalize()\n box = Box2DKernel(5)\n\n c2 = convolve(delta_pulse_2D, custom, boundary='fill')\n c1 = convolve(delta_pulse_2D, box, boundary='fill')\n assert_almost_equal(c1, c2, decimal=12)\n\n def test_custom_1D_kernel_list(self):\n \"\"\"\n Check if CustomKernel works with lists.\n \"\"\"\n custom = CustomKernel([1, 1, 1, 1, 1])\n assert custom.is_bool is True\n\n def test_custom_2D_kernel_list(self):\n \"\"\"\n Check if CustomKernel works with lists.\n \"\"\"\n custom = CustomKernel([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n assert custom.is_bool is True\n\n def test_custom_1D_kernel_zerosum(self):\n \"\"\"\n Check if CustomKernel works when the input array/list\n sums to zero.\n \"\"\"\n array = [-2, -1, 0, 1, 2]\n\n custom = CustomKernel(array)\n\n with pytest.warns(AstropyUserWarning, match=r'kernel cannot be '\n r'normalized because it sums to zero'):\n custom.normalize()\n\n assert custom.truncation == 1.\n assert custom._kernel_sum == 0.\n\n def test_custom_2D_kernel_zerosum(self):\n \"\"\"\n Check if CustomKernel works when the input array/list\n sums to zero.\n \"\"\"\n array = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]\n\n custom = CustomKernel(array)\n\n with pytest.warns(AstropyUserWarning, match=r'kernel cannot be '\n r'normalized because it sums to zero'):\n custom.normalize()\n\n assert custom.truncation == 1.\n assert custom._kernel_sum == 0.\n\n def test_custom_kernel_odd_error(self):\n \"\"\"\n Check if CustomKernel raises if the array size is odd.\n \"\"\"\n with pytest.raises(KernelSizeError):\n CustomKernel([1, 1, 1, 1])\n\n def test_add_1D_kernels(self):\n \"\"\"\n Check if adding of two 1D kernels works.\n \"\"\"\n box_1 = Box1DKernel(5)\n box_2 = Box1DKernel(3)\n box_3 = Box1DKernel(1)\n box_sum_1 = box_1 + box_2 + box_3\n box_sum_2 = box_2 + box_3 + box_1\n box_sum_3 = box_3 + box_1 + box_2\n ref = [1/5., 1/5. + 1/3., 1 + 1/3. + 1/5., 1/5. + 1/3., 1/5.]\n assert_almost_equal(box_sum_1.array, ref, decimal=12)\n assert_almost_equal(box_sum_2.array, ref, decimal=12)\n assert_almost_equal(box_sum_3.array, ref, decimal=12)\n\n # Assert that the kernels haven't changed\n assert_almost_equal(box_1.array, [0.2, 0.2, 0.2, 0.2, 0.2], decimal=12)\n assert_almost_equal(box_2.array, [1/3., 1/3., 1/3.], decimal=12)\n assert_almost_equal(box_3.array, [1], decimal=12)\n\n def test_add_2D_kernels(self):\n \"\"\"\n Check if adding of two 1D kernels works.\n \"\"\"\n box_1 = Box2DKernel(3)\n box_2 = Box2DKernel(1)\n box_sum_1 = box_1 + box_2\n box_sum_2 = box_2 + box_1\n ref = [[1 / 9., 1 / 9., 1 / 9.],\n [1 / 9., 1 + 1 / 9., 1 / 9.],\n [1 / 9., 1 / 9., 1 / 9.]]\n ref_1 = [[1 / 9., 1 / 9., 1 / 9.],\n [1 / 9., 1 / 9., 1 / 9.],\n [1 / 9., 1 / 9., 1 / 9.]]\n assert_almost_equal(box_2.array, [[1]], decimal=12)\n assert_almost_equal(box_1.array, ref_1, decimal=12)\n assert_almost_equal(box_sum_1.array, ref, decimal=12)\n assert_almost_equal(box_sum_2.array, ref, decimal=12)\n\n def test_Gaussian1DKernel_even_size(self):\n \"\"\"\n Check if even size for GaussianKernel works.\n \"\"\"\n gauss = Gaussian1DKernel(3, x_size=10)\n assert gauss.array.size == 10\n\n def test_Gaussian2DKernel_even_size(self):\n \"\"\"\n Check if even size for GaussianKernel works.\n \"\"\"\n gauss = Gaussian2DKernel(3, x_size=10, y_size=10)\n assert gauss.array.shape == (10, 10)\n\n # https://github.com/astropy/astropy/issues/3605\n def test_Gaussian2DKernel_rotated(self):\n gauss = Gaussian2DKernel(\n x_stddev=3, y_stddev=1.5, theta=0.7853981633974483,\n x_size=5, y_size=5) # rotated 45 deg ccw\n ans = [[0.04087193, 0.04442386, 0.03657381, 0.02280797, 0.01077372],\n [0.04442386, 0.05704137, 0.05547869, 0.04087193, 0.02280797],\n [0.03657381, 0.05547869, 0.06374482, 0.05547869, 0.03657381],\n [0.02280797, 0.04087193, 0.05547869, 0.05704137, 0.04442386],\n [0.01077372, 0.02280797, 0.03657381, 0.04442386, 0.04087193]]\n assert_allclose(gauss, ans, rtol=0.001) # Rough comparison at 0.1 %\n\n def test_normalize_peak(self):\n \"\"\"\n Check if normalize works with peak mode.\n \"\"\"\n custom = CustomKernel([1, 2, 3, 2, 1])\n custom.normalize(mode='peak')\n assert custom.array.max() == 1\n\n def test_check_kernel_attributes(self):\n \"\"\"\n Check if kernel attributes are correct.\n \"\"\"\n box = Box2DKernel(5)\n\n # Check truncation\n assert box.truncation == 0\n\n # Check model\n assert isinstance(box.model, Box2D)\n\n # Check center\n assert box.center == [2, 2]\n\n # Check normalization\n box.normalize()\n assert_almost_equal(box._kernel_sum, 1., decimal=12)\n\n # Check separability\n assert box.separable\n\n @pytest.mark.parametrize(('kernel_type', 'mode'), list(itertools.product(KERNEL_TYPES, MODES)))\n def test_discretize_modes(self, kernel_type, mode):\n \"\"\"\n Check if the different modes result in kernels that work with convolve.\n Use only small kernel width, to make the test pass quickly.\n \"\"\"\n if kernel_type == AiryDisk2DKernel and not HAS_SCIPY:\n pytest.skip(\"Omitting AiryDisk2DKernel, which requires SciPy\")\n if not kernel_type == Ring2DKernel:\n kernel = kernel_type(3)\n else:\n kernel = kernel_type(3, 3 * 0.2)\n\n if kernel.dimension == 1:\n c1 = convolve_fft(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)\n c2 = convolve(delta_pulse_1D, kernel, boundary='fill', normalize_kernel=False)\n assert_almost_equal(c1, c2, decimal=12)\n else:\n c1 = convolve_fft(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)\n c2 = convolve(delta_pulse_2D, kernel, boundary='fill', normalize_kernel=False)\n assert_almost_equal(c1, c2, decimal=12)\n\n @pytest.mark.parametrize(('width'), WIDTHS_EVEN)\n def test_box_kernels_even_size(self, width):\n \"\"\"\n Check if BoxKernel work properly with even sizes.\n \"\"\"\n kernel_1D = Box1DKernel(width)\n assert kernel_1D.shape[0] % 2 != 0\n assert kernel_1D.array.sum() == 1.\n\n kernel_2D = Box2DKernel(width)\n assert np.all([_ % 2 != 0 for _ in kernel_2D.shape])\n assert kernel_2D.array.sum() == 1.\n\n def test_kernel_normalization(self):\n \"\"\"\n Test that repeated normalizations do not change the kernel [#3747].\n \"\"\"\n\n kernel = CustomKernel(np.ones(5))\n kernel.normalize()\n data = np.copy(kernel.array)\n\n kernel.normalize()\n assert_allclose(data, kernel.array)\n\n kernel.normalize()\n assert_allclose(data, kernel.array)\n\n def test_kernel_normalization_mode(self):\n \"\"\"\n Test that an error is raised if mode is invalid.\n \"\"\"\n with pytest.raises(ValueError):\n kernel = CustomKernel(np.ones(3))\n kernel.normalize(mode='invalid')\n\n def test_kernel1d_int_size(self):\n \"\"\"\n Test that an error is raised if ``Kernel1D`` ``x_size`` is not\n an integer.\n \"\"\"\n with pytest.raises(TypeError):\n Gaussian1DKernel(3, x_size=1.2)\n\n def test_kernel2d_int_xsize(self):\n \"\"\"\n Test that an error is raised if ``Kernel2D`` ``x_size`` is not\n an integer.\n \"\"\"\n with pytest.raises(TypeError):\n Gaussian2DKernel(3, x_size=1.2)\n\n def test_kernel2d_int_ysize(self):\n \"\"\"\n Test that an error is raised if ``Kernel2D`` ``y_size`` is not\n an integer.\n \"\"\"\n with pytest.raises(TypeError):\n Gaussian2DKernel(3, x_size=5, y_size=1.2)\n\n def test_kernel1d_initialization(self):\n \"\"\"\n Test that an error is raised if an array or model is not\n specified for ``Kernel1D``.\n \"\"\"\n with pytest.raises(TypeError):\n Kernel1D()\n\n def test_kernel2d_initialization(self):\n \"\"\"\n Test that an error is raised if an array or model is not\n specified for ``Kernel2D``.\n \"\"\"\n with pytest.raises(TypeError):\n Kernel2D()\n\n def test_array_keyword_not_allowed(self):\n \"\"\"\n Regression test for issue #10439\n \"\"\"\n x = np.ones([10, 10])\n with pytest.raises(TypeError, match=r\".* allowed .*\"):\n AiryDisk2DKernel(2, array=x)\n Box1DKernel(2, array=x)\n Box2DKernel(2, array=x)\n Gaussian1DKernel(2, array=x)\n Gaussian2DKernel(2, array=x)\n RickerWavelet1DKernel(2, array=x)\n RickerWavelet2DKernel(2, array=x)\n Model1DKernel(Gaussian1D(1, 0, 2), array=x)\n Model2DKernel(Gaussian2D(1, 0, 0, 2, 2), array=x)\n Ring2DKernel(9, 8, array=x)\n Tophat2DKernel(2, array=x)\n Trapezoid1DKernel(2, array=x)\n Trapezoid1DKernel(2, array=x)\n"}}},{"rowIdx":1359,"cells":{"hash":{"kind":"string","value":"009485797fd80b8658d732d64a22be481fefb067b22a67cab6b781bf863d1b93"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport io\nimport os\nimport sys\nimport subprocess\n\nimport pytest\n\nfrom astropy.config import (configuration, set_temp_config, paths,\n create_config_file)\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\n\n\nOLD_CONFIG = {}\n\n\ndef setup_module():\n OLD_CONFIG.clear()\n OLD_CONFIG.update(configuration._cfgobjs)\n\n\ndef teardown_module():\n configuration._cfgobjs.clear()\n configuration._cfgobjs.update(OLD_CONFIG)\n\n\ndef test_paths():\n assert 'astropy' in paths.get_config_dir()\n assert 'astropy' in paths.get_cache_dir()\n\n assert 'testpkg' in paths.get_config_dir(rootname='testpkg')\n assert 'testpkg' in paths.get_cache_dir(rootname='testpkg')\n\n\ndef test_set_temp_config(tmpdir, monkeypatch):\n # Check that we start in an understood state.\n assert configuration._cfgobjs == OLD_CONFIG\n # Temporarily remove any temporary overrides of the configuration dir.\n monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)\n\n orig_config_dir = paths.get_config_dir(rootname='astropy')\n temp_config_dir = str(tmpdir.mkdir('config'))\n temp_astropy_config = os.path.join(temp_config_dir, 'astropy')\n\n # Test decorator mode\n @paths.set_temp_config(temp_config_dir)\n def test_func():\n assert paths.get_config_dir(rootname='astropy') == temp_astropy_config\n\n # Test temporary restoration of original default\n with paths.set_temp_config() as d:\n assert d == orig_config_dir == paths.get_config_dir(rootname='astropy')\n\n test_func()\n\n # Test context manager mode (with cleanup)\n with paths.set_temp_config(temp_config_dir, delete=True):\n assert paths.get_config_dir(rootname='astropy') == temp_astropy_config\n\n assert not os.path.exists(temp_config_dir)\n # Check that we have returned to our old configuration.\n assert configuration._cfgobjs == OLD_CONFIG\n\n\ndef test_set_temp_cache(tmpdir, monkeypatch):\n monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)\n\n orig_cache_dir = paths.get_cache_dir(rootname='astropy')\n temp_cache_dir = str(tmpdir.mkdir('cache'))\n temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy')\n\n # Test decorator mode\n @paths.set_temp_cache(temp_cache_dir)\n def test_func():\n assert paths.get_cache_dir(rootname='astropy') == temp_astropy_cache\n\n # Test temporary restoration of original default\n with paths.set_temp_cache() as d:\n assert d == orig_cache_dir == paths.get_cache_dir(rootname='astropy')\n\n test_func()\n\n # Test context manager mode (with cleanup)\n with paths.set_temp_cache(temp_cache_dir, delete=True):\n assert paths.get_cache_dir(rootname='astropy') == temp_astropy_cache\n\n assert not os.path.exists(temp_cache_dir)\n\n\ndef test_set_temp_cache_resets_on_exception(tmpdir):\n \"\"\"Test for regression of bug #9704\"\"\"\n t = paths.get_cache_dir()\n a = tmpdir / 'a'\n with open(a, 'wt') as f:\n f.write(\"not a good cache\\n\")\n with pytest.raises(OSError):\n with paths.set_temp_cache(a):\n pass\n assert t == paths.get_cache_dir()\n\n\ndef test_config_file():\n from astropy.config.configuration import get_config, reload_config\n\n apycfg = get_config('astropy')\n assert apycfg.filename.endswith('astropy.cfg')\n\n cfgsec = get_config('astropy.config')\n assert cfgsec.depth == 1\n assert cfgsec.name == 'config'\n assert cfgsec.parent.filename.endswith('astropy.cfg')\n\n # try with a different package name, still inside astropy config dir:\n testcfg = get_config('testpkg', rootname='astropy')\n parts = os.path.normpath(testcfg.filename).split(os.sep)\n assert '.astropy' in parts or 'astropy' in parts\n assert parts[-1] == 'testpkg.cfg'\n configuration._cfgobjs['testpkg'] = None # HACK\n\n # try with a different package name, no specified root name (should\n # default to astropy):\n testcfg = get_config('testpkg')\n parts = os.path.normpath(testcfg.filename).split(os.sep)\n assert '.astropy' in parts or 'astropy' in parts\n assert parts[-1] == 'testpkg.cfg'\n configuration._cfgobjs['testpkg'] = None # HACK\n\n # try with a different package name, specified root name:\n testcfg = get_config('testpkg', rootname='testpkg')\n parts = os.path.normpath(testcfg.filename).split(os.sep)\n assert '.testpkg' in parts or 'testpkg' in parts\n assert parts[-1] == 'testpkg.cfg'\n configuration._cfgobjs['testpkg'] = None # HACK\n\n # try with a subpackage with specified root name:\n testcfg_sec = get_config('testpkg.somemodule', rootname='testpkg')\n parts = os.path.normpath(testcfg_sec.parent.filename).split(os.sep)\n assert '.testpkg' in parts or 'testpkg' in parts\n assert parts[-1] == 'testpkg.cfg'\n configuration._cfgobjs['testpkg'] = None # HACK\n\n reload_config('astropy')\n\n\ndef check_config(conf):\n # test that the output contains some lines that we expect\n assert '# unicode_output = False' in conf\n assert '[io.fits]' in conf\n assert '[table]' in conf\n assert '# replace_warnings = ,' in conf\n assert '[table.jsviewer]' in conf\n assert '# css_urls = https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css,' in conf\n assert '[visualization.wcsaxes]' in conf\n assert '## Whether to log exceptions before raising them.' in conf\n assert '# log_exceptions = False' in conf\n\n\ndef test_generate_config(tmp_path):\n from astropy.config.configuration import generate_config\n out = io.StringIO()\n generate_config('astropy', out)\n conf = out.getvalue()\n\n outfile = tmp_path / 'astropy.cfg'\n generate_config('astropy', outfile)\n with open(outfile) as fp:\n conf2 = fp.read()\n\n for c in (conf, conf2):\n check_config(c)\n\n\ndef test_generate_config2(tmp_path):\n \"\"\"Test that generate_config works with the default filename.\"\"\"\n\n with set_temp_config(tmp_path):\n from astropy.config.configuration import generate_config\n generate_config('astropy')\n\n assert os.path.exists(tmp_path / 'astropy' / 'astropy.cfg')\n\n with open(tmp_path / 'astropy' / 'astropy.cfg') as fp:\n conf = fp.read()\n\n check_config(conf)\n\n\ndef test_create_config_file(tmp_path, caplog):\n with set_temp_config(tmp_path):\n create_config_file('astropy')\n\n # check that the config file has been created\n assert ('The configuration file has been successfully written'\n in caplog.records[0].message)\n assert os.path.exists(tmp_path / 'astropy' / 'astropy.cfg')\n\n with open(tmp_path / 'astropy' / 'astropy.cfg') as fp:\n conf = fp.read()\n check_config(conf)\n\n caplog.clear()\n\n # now modify the config file\n conf = conf.replace('# unicode_output = False', 'unicode_output = True')\n with open(tmp_path / 'astropy' / 'astropy.cfg', mode='w') as fp:\n fp.write(conf)\n\n with set_temp_config(tmp_path):\n create_config_file('astropy')\n\n # check that the config file has not been overwritten since it was modified\n assert ('The configuration file already exists and seems to have been '\n 'customized' in caplog.records[0].message)\n\n caplog.clear()\n\n with set_temp_config(tmp_path):\n create_config_file('astropy', overwrite=True)\n\n # check that the config file has been overwritten\n assert ('The configuration file has been successfully written'\n in caplog.records[0].message)\n\n\ndef test_configitem():\n\n from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config\n\n ci = ConfigItem(34, 'this is a Description')\n\n class Conf(ConfigNamespace):\n tstnm = ci\n\n conf = Conf()\n\n assert ci.module == 'astropy.config.tests.test_configs'\n assert ci() == 34\n assert ci.description == 'this is a Description'\n\n assert conf.tstnm == 34\n\n sec = get_config(ci.module)\n assert sec['tstnm'] == 34\n\n ci.description = 'updated Descr'\n ci.set(32)\n assert ci() == 32\n\n # It's useful to go back to the default to allow other test functions to\n # call this one and still be in the default configuration.\n ci.description = 'this is a Description'\n ci.set(34)\n assert ci() == 34\n\n # Test iterator for one-item namespace\n result = [x for x in conf]\n assert result == ['tstnm']\n result = [x for x in conf.keys()]\n assert result == ['tstnm']\n result = [x for x in conf.values()]\n assert result == [ci]\n result = [x for x in conf.items()]\n assert result == [('tstnm', ci)]\n\n\ndef test_configitem_types():\n\n from astropy.config.configuration import ConfigNamespace, ConfigItem\n\n ci1 = ConfigItem(34)\n ci2 = ConfigItem(34.3)\n ci3 = ConfigItem(True)\n ci4 = ConfigItem('astring')\n\n class Conf(ConfigNamespace):\n tstnm1 = ci1\n tstnm2 = ci2\n tstnm3 = ci3\n tstnm4 = ci4\n\n conf = Conf()\n\n assert isinstance(conf.tstnm1, int)\n assert isinstance(conf.tstnm2, float)\n assert isinstance(conf.tstnm3, bool)\n assert isinstance(conf.tstnm4, str)\n\n with pytest.raises(TypeError):\n conf.tstnm1 = 34.3\n conf.tstnm2 = 12 # this would should succeed as up-casting\n with pytest.raises(TypeError):\n conf.tstnm3 = 'fasd'\n with pytest.raises(TypeError):\n conf.tstnm4 = 546.245\n\n # Test iterator for multi-item namespace. Assume ordered by insertion order.\n item_names = [x for x in conf]\n assert item_names == ['tstnm1', 'tstnm2', 'tstnm3', 'tstnm4']\n result = [x for x in conf.keys()]\n assert result == item_names\n result = [x for x in conf.values()]\n assert result == [ci1, ci2, ci3, ci4]\n result = [x for x in conf.items()]\n assert result == [('tstnm1', ci1), ('tstnm2', ci2), ('tstnm3', ci3), ('tstnm4', ci4)]\n\n\ndef test_configitem_options(tmpdir):\n\n from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config\n\n cio = ConfigItem(['op1', 'op2', 'op3'])\n\n class Conf(ConfigNamespace):\n tstnmo = cio\n\n conf = Conf() # noqa\n\n sec = get_config(cio.module)\n\n assert isinstance(cio(), str)\n assert cio() == 'op1'\n assert sec['tstnmo'] == 'op1'\n\n cio.set('op2')\n with pytest.raises(TypeError):\n cio.set('op5')\n assert sec['tstnmo'] == 'op2'\n\n # now try saving\n apycfg = sec\n while apycfg.parent is not apycfg:\n apycfg = apycfg.parent\n f = tmpdir.join('astropy.cfg')\n with open(f.strpath, 'wb') as fd:\n apycfg.write(fd)\n with open(f.strpath, encoding='utf-8') as fd:\n lns = [x.strip() for x in f.readlines()]\n\n assert 'tstnmo = op2' in lns\n\n\ndef test_config_noastropy_fallback(monkeypatch):\n \"\"\"\n Tests to make sure configuration items fall back to their defaults when\n there's a problem accessing the astropy directory\n \"\"\"\n\n # make sure the config directory is not searched\n monkeypatch.setenv('XDG_CONFIG_HOME', 'foo')\n monkeypatch.delenv('XDG_CONFIG_HOME')\n monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)\n\n # make sure the _find_or_create_root_dir function fails as though the\n # astropy dir could not be accessed\n def osraiser(dirnm, linkto, pkgname=None):\n raise OSError\n monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)\n\n # also have to make sure the stored configuration objects are cleared\n monkeypatch.setattr(configuration, '_cfgobjs', {})\n\n with pytest.raises(OSError):\n # make sure the config dir search fails\n paths.get_config_dir(rootname='astropy')\n\n # now run the basic tests, and make sure the warning about no astropy\n # is present\n test_configitem()\n\n\ndef test_configitem_setters():\n\n from astropy.config.configuration import ConfigNamespace, ConfigItem\n\n class Conf(ConfigNamespace):\n tstnm12 = ConfigItem(42, 'this is another Description')\n\n conf = Conf()\n\n assert conf.tstnm12 == 42\n with conf.set_temp('tstnm12', 45):\n assert conf.tstnm12 == 45\n assert conf.tstnm12 == 42\n\n conf.tstnm12 = 43\n assert conf.tstnm12 == 43\n\n with conf.set_temp('tstnm12', 46):\n assert conf.tstnm12 == 46\n\n # Make sure it is reset even with Exception\n try:\n with conf.set_temp('tstnm12', 47):\n raise Exception\n except Exception:\n pass\n\n assert conf.tstnm12 == 43\n\n\ndef test_empty_config_file():\n from astropy.config.configuration import is_unedited_config_file\n\n def get_content(fn):\n with open(get_pkg_data_filename(fn), encoding='latin-1') as fd:\n return fd.read()\n\n content = get_content('data/empty.cfg')\n assert is_unedited_config_file(content)\n\n content = get_content('data/not_empty.cfg')\n assert not is_unedited_config_file(content)\n\n\nclass TestAliasRead:\n\n def setup_class(self):\n configuration._override_config_file = get_pkg_data_filename('data/alias.cfg')\n\n def test_alias_read(self):\n from astropy.utils.data import conf\n\n with pytest.warns(\n AstropyDeprecationWarning,\n match=r\"Config parameter 'name_resolve_timeout' in section \"\n r\"\\[coordinates.name_resolve\\].*\") as w:\n conf.reload()\n assert conf.remote_timeout == 42\n\n assert len(w) == 1\n\n def teardown_class(self):\n from astropy.utils.data import conf\n\n configuration._override_config_file = None\n conf.reload()\n\n\ndef test_configitem_unicode(tmpdir):\n\n from astropy.config.configuration import ConfigNamespace, ConfigItem, get_config\n\n cio = ConfigItem('ასტრონომიის')\n\n class Conf(ConfigNamespace):\n tstunicode = cio\n\n conf = Conf() # noqa\n\n sec = get_config(cio.module)\n\n assert isinstance(cio(), str)\n assert cio() == 'ასტრონომიის'\n assert sec['tstunicode'] == 'ასტრონომიის'\n\n\ndef test_warning_move_to_top_level():\n # Check that the warning about deprecation config items in the\n # file works. See #2514\n from astropy import conf\n\n configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg')\n\n try:\n with pytest.warns(AstropyDeprecationWarning) as w:\n conf.reload()\n conf.max_lines\n assert len(w) == 1\n finally:\n configuration._override_config_file = None\n conf.reload()\n\n\ndef test_no_home():\n # \"import astropy\" fails when neither $HOME or $XDG_CONFIG_HOME\n # are set. To test, we unset those environment variables for a\n # subprocess and try to import astropy.\n\n test_path = os.path.dirname(__file__)\n astropy_path = os.path.abspath(\n os.path.join(test_path, '..', '..', '..'))\n\n env = os.environ.copy()\n paths = [astropy_path]\n if env.get('PYTHONPATH'):\n paths.append(env.get('PYTHONPATH'))\n env['PYTHONPATH'] = os.pathsep.join(paths)\n\n for val in ['HOME', 'XDG_CONFIG_HOME']:\n if val in env:\n del env[val]\n\n retcode = subprocess.check_call(\n [sys.executable, '-c', 'import astropy'],\n env=env)\n\n assert retcode == 0\n"}}},{"rowIdx":1360,"cells":{"hash":{"kind":"string","value":"2489af990e66d1af3a2f711ed276b6d7f599080f270bfa9141d9908f056aecd3"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nModule to test fitting routines\n\"\"\"\n# pylint: disable=invalid-name\nimport os.path\nimport unittest.mock as mk\nfrom importlib.metadata import EntryPoint\nfrom itertools import combinations\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom numpy import linalg\nfrom numpy.testing import assert_allclose, assert_almost_equal, assert_equal\n\nfrom astropy.modeling import models\nfrom astropy.modeling.core import Fittable2DModel, Parameter\nfrom astropy.modeling.fitting import (\n DogBoxLSQFitter, Fitter, FittingWithOutlierRemoval, JointFitter, LevMarLSQFitter,\n LinearLSQFitter, LMLSQFitter, NonFiniteValueError, SimplexLSQFitter, SLSQPLSQFitter,\n TRFLSQFitter, _NLLSQFitter, populate_entry_points)\nfrom astropy.modeling.optimizers import Optimization\nfrom astropy.stats import sigma_clip\nfrom astropy.utils import NumpyRNGContext\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom . import irafutil\n\nif HAS_SCIPY:\n from scipy import optimize\n\n\nfitters = [SimplexLSQFitter, SLSQPLSQFitter]\nnon_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]\n\n_RANDOM_SEED = 0x1337\n\n\nclass TestPolynomial2D:\n \"\"\"Tests for 2D polynomial fitting.\"\"\"\n\n def setup_class(self):\n self.model = models.Polynomial2D(2)\n self.y, self.x = np.mgrid[:5, :5]\n\n def poly2(x, y):\n return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y\n self.z = poly2(self.x, self.y)\n\n def test_poly2D_fitting(self):\n fitter = LinearLSQFitter()\n v = self.model.fit_deriv(x=self.x, y=self.y)\n p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]\n new_model = fitter(self.model, self.x, self.y, self.z)\n assert_allclose(new_model.parameters, p)\n\n def test_eval(self):\n fitter = LinearLSQFitter()\n new_model = fitter(self.model, self.x, self.y, self.z)\n assert_allclose(new_model(self.x, self.y), self.z)\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_nonlinear_fitting(self, fitter):\n fitter = fitter()\n\n self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n new_model = fitter(self.model, self.x, self.y, self.z)\n assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_compare_nonlinear_fitting(self):\n self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]\n fit_models = []\n for fitter in non_linear_fitters:\n fitter = fitter()\n\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n fit_models.append(fitter(self.model, self.x, self.y, self.z))\n\n for pair in combinations(fit_models, 2):\n assert_allclose(pair[0].parameters, pair[1].parameters)\n\n\nclass TestICheb2D:\n \"\"\"\n Tests 2D Chebyshev polynomial fitting\n\n Create a 2D polynomial (z) using Polynomial2DModel and default coefficients\n Fit z using a ICheb2D model\n Evaluate the ICheb2D polynomial and compare with the initial z\n \"\"\"\n\n def setup_class(self):\n self.pmodel = models.Polynomial2D(2)\n self.y, self.x = np.mgrid[:5, :5]\n self.z = self.pmodel(self.x, self.y)\n self.cheb2 = models.Chebyshev2D(2, 2)\n self.fitter = LinearLSQFitter()\n\n def test_default_params(self):\n self.cheb2.parameters = np.arange(9)\n p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,\n 128.])\n z = self.cheb2(self.x, self.y)\n model = self.fitter(self.cheb2, self.x, self.y, z)\n assert_almost_equal(model.parameters, p)\n\n def test_poly2D_cheb2D(self):\n model = self.fitter(self.cheb2, self.x, self.y, self.z)\n z1 = model(self.x, self.y)\n assert_almost_equal(self.z, z1)\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_chebyshev2D_nonlinear_fitting(self, fitter):\n fitter = fitter()\n\n cheb2d = models.Chebyshev2D(2, 2)\n cheb2d.parameters = np.arange(9)\n z = cheb2d(self.x, self.y)\n cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n model = fitter(cheb2d, self.x, self.y, z)\n assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],\n atol=10**-9)\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):\n fitter = fitter()\n\n cheb2d = models.Chebyshev2D(2, 2)\n cheb2d.parameters = np.arange(9)\n z = cheb2d(self.x, self.y)\n cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]\n weights = np.ones_like(self.y)\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n model = fitter(cheb2d, self.x, self.y, z, weights=weights)\n assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],\n atol=10**-9)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass TestJointFitter:\n\n \"\"\"\n Tests the joint fitting routine using 2 gaussian models\n \"\"\"\n\n def setup_class(self):\n \"\"\"\n Create 2 gaussian models and some data with noise.\n Create a fitter for the two models keeping the amplitude parameter\n common for the two models.\n \"\"\"\n self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)\n self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)\n self.jf = JointFitter([self.g1, self.g2],\n {self.g1: ['amplitude'],\n self.g2: ['amplitude']}, [9.8])\n self.x = np.arange(10, 20, .1)\n y1 = self.g1(self.x)\n y2 = self.g2(self.x)\n\n with NumpyRNGContext(_RANDOM_SEED):\n n = np.random.randn(100)\n\n self.ny1 = y1 + 2 * n\n self.ny2 = y2 + 2 * n\n self.jf(self.x, self.ny1, self.x, self.ny2)\n\n def test_joint_parameter(self):\n \"\"\"\n Tests that the amplitude of the two models is the same\n \"\"\"\n assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])\n assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])\n\n def test_joint_fitter(self):\n \"\"\"\n Tests the fitting routine with similar procedure.\n Compares the fitted parameters.\n \"\"\"\n p1 = [14.9, .3]\n p2 = [13, .4]\n A = 9.8\n p = np.r_[A, p1, p2]\n\n def model(A, p, x):\n return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)\n\n def errfunc(p, x1, y1, x2, y2):\n return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,\n model(p[0], p[3:], x2) - y2])\n\n coeff, _ = optimize.leastsq(errfunc, p,\n args=(self.x, self.ny1, self.x, self.ny2))\n assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))\n\n\nclass TestLinearLSQFitter:\n def test_compound_model_raises_error(self):\n \"\"\"Test that if an user tries to use a compound model, raises an error\"\"\"\n with pytest.raises(ValueError) as excinfo:\n init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)\n init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)\n init_model_comp = init_model1 + init_model2\n x = np.arange(10)\n y = init_model_comp(x, model_set_axis=False)\n fitter = LinearLSQFitter()\n _ = fitter(init_model_comp, x, y)\n assert \"Model must be simple, not compound\" in str(excinfo.value)\n\n def test_chebyshev1D(self):\n \"\"\"Tests fitting a 1D Chebyshev polynomial to some real world data.\"\"\"\n\n test_file = get_pkg_data_filename(os.path.join('data',\n 'idcompspec.fits'))\n with open(test_file) as f:\n lines = f.read()\n reclist = lines.split('begin')\n\n record = irafutil.IdentifyRecord(reclist[1])\n coeffs = record.coeff\n order = int(record.fields['order'])\n\n initial_model = models.Chebyshev1D(order - 1,\n domain=record.get_range())\n fitter = LinearLSQFitter()\n\n fitted_model = fitter(initial_model, record.x, record.z)\n assert_allclose(fitted_model.parameters, np.array(coeffs),\n rtol=10e-2)\n\n def test_linear_fit_model_set(self):\n \"\"\"Tests fitting multiple models simultaneously.\"\"\"\n\n init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)\n x = np.arange(10)\n y_expected = init_model(x, model_set_axis=False)\n assert y_expected.shape == (2, 10)\n\n # Add a bit of random noise\n with NumpyRNGContext(_RANDOM_SEED):\n y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model(x, model_set_axis=False), y_expected,\n rtol=1e-1)\n\n def test_linear_fit_2d_model_set(self):\n \"\"\"Tests fitted multiple 2-D models simultaneously.\"\"\"\n\n init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)\n x = np.arange(10)\n y = np.arange(10)\n z_expected = init_model(x, y, model_set_axis=False)\n assert z_expected.shape == (2, 10)\n\n # Add a bit of random noise\n with NumpyRNGContext(_RANDOM_SEED):\n z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, z)\n assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,\n rtol=1e-1)\n\n def test_linear_fit_fixed_parameter(self):\n \"\"\"\n Tests fitting a polynomial model with a fixed parameter (issue #6135).\n \"\"\"\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5*x*x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)\n\n def test_linear_fit_model_set_fixed_parameter(self):\n \"\"\"\n Tests fitting a polynomial model set with a fixed parameter (#6135).\n \"\"\"\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5*x*x, -2*x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)\n assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)\n\n def test_linear_fit_2d_model_set_fixed_parameters(self):\n \"\"\"\n Tests fitting a 2d polynomial model set with fixed parameters (#6135).\n \"\"\"\n init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],\n n_models=2,\n fixed={'c1_0': True, 'c0_1': True})\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz,\n atol=1e-14)\n\n def test_linear_fit_model_set_masked_values(self):\n \"\"\"\n Tests model set fitting with masked value(s) (#4824, #6819).\n \"\"\"\n # NB. For single models, there is an equivalent doctest.\n\n init_model = models.Polynomial1D(degree=1, n_models=2)\n x = np.arange(10)\n y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))\n\n y[0, 7] = 100. # throw off fit coefficients if unmasked\n y.mask[0, 7] = True\n y[1, 1:3] = -100.\n y.mask[1, 1:3] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n\n assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)\n assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)\n\n def test_linear_fit_2d_model_set_masked_values(self):\n \"\"\"\n Tests 2D model set fitting with masked value(s) (#4824, #6819).\n \"\"\"\n init_model = models.Polynomial2D(1, n_models=2)\n x, y = np.mgrid[0:5, 0:5]\n z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],\n mask=np.zeros_like([x, x]))\n\n z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked\n z.mask[0, 3, 1] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, z)\n\n assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)\n assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)\n assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass TestNonLinearFitters:\n \"\"\"Tests non-linear least squares fitting and the SLSQP algorithm.\"\"\"\n\n def setup_class(self):\n self.initial_values = [100, 5, 1]\n\n self.xdata = np.arange(0, 10, 0.1)\n sigma = 4. * np.ones_like(self.xdata)\n\n with NumpyRNGContext(_RANDOM_SEED):\n yerror = np.random.normal(0, sigma)\n\n def func(p, x):\n return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)\n\n self.ydata = func(self.initial_values, self.xdata) + yerror\n self.gauss = models.Gaussian1D(100, 5, stddev=1)\n\n @pytest.mark.parametrize('fitter0', non_linear_fitters)\n @pytest.mark.parametrize('fitter1', non_linear_fitters)\n def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):\n \"\"\"\n Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and\n analytic derivatives of a `Gaussian1D`.\n \"\"\"\n fitter0 = fitter0()\n model = fitter0(self.gauss, self.xdata, self.ydata)\n g1e = models.Gaussian1D(100, 5.0, stddev=1)\n\n fitter1 = fitter1()\n emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)\n assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))\n\n @pytest.mark.parametrize('fitter0', non_linear_fitters)\n @pytest.mark.parametrize('fitter1', non_linear_fitters)\n def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):\n \"\"\"\n Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and\n analytic derivatives of a `Gaussian1D`.\n \"\"\"\n\n weights = 1.0 / (self.ydata / 10.)\n\n fitter0 = fitter0()\n model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)\n g1e = models.Gaussian1D(100, 5.0, stddev=1)\n\n fitter1 = fitter1()\n emodel = fitter1(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)\n assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))\n\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_with_optimize(self, fitter):\n \"\"\"\n Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against\n `scipy.optimize.leastsq`.\n \"\"\"\n fitter = fitter()\n\n model = fitter(self.gauss, self.xdata, self.ydata,\n estimate_jacobian=True)\n\n def func(p, x):\n return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)\n\n def errfunc(p, x, y):\n return func(p, x) - y\n\n result = optimize.leastsq(errfunc, self.initial_values,\n args=(self.xdata, self.ydata))\n assert_allclose(model.parameters, result[0], rtol=10 ** (-3))\n\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_with_weights(self, fitter):\n \"\"\"\n Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.\n \"\"\"\n fitter = fitter()\n\n # part 1: weights are equal to 1\n model = fitter(self.gauss, self.xdata, self.ydata,\n estimate_jacobian=True)\n withw = fitter(self.gauss, self.xdata, self.ydata,\n estimate_jacobian=True, weights=np.ones_like(self.xdata))\n\n assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))\n\n # part 2: weights are 0 or 1 (effectively, they are a mask)\n weights = np.zeros_like(self.xdata)\n weights[::2] = 1.\n mask = weights >= 1.\n\n model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],\n estimate_jacobian=True)\n withw = fitter(self.gauss, self.xdata, self.ydata,\n estimate_jacobian=True, weights=weights)\n\n assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))\n\n @pytest.mark.filterwarnings(r'ignore:.* Maximum number of iterations reached')\n @pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '\n r'clipping to bounds')\n @pytest.mark.parametrize('fitter_class', fitters)\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_fitter_against_LevMar(self, fitter_class, fitter):\n \"\"\"\n Tests results from non-linear fitters against `LevMarLSQFitter`\n and `TRFLSQFitter`\n \"\"\"\n fitter = fitter()\n\n fitter_cls = fitter_class()\n # This emits a warning from fitter that we need to ignore with\n # pytest.mark.filterwarnings above.\n new_model = fitter_cls(self.gauss, self.xdata, self.ydata)\n model = fitter(self.gauss, self.xdata, self.ydata)\n assert_allclose(model.parameters, new_model.parameters,\n rtol=10 ** (-4))\n\n @pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '\n r'clipping to bounds')\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_LSQ_SLSQP_with_constraints(self, fitter):\n \"\"\"\n Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a\n model with constraints.\n \"\"\"\n fitter = fitter()\n\n g1 = models.Gaussian1D(100, 5, stddev=1)\n g1.mean.fixed = True\n fslsqp = SLSQPLSQFitter()\n slsqp_model = fslsqp(g1, self.xdata, self.ydata)\n model = fitter(g1, self.xdata, self.ydata)\n assert_allclose(model.parameters, slsqp_model.parameters,\n rtol=10 ** (-4))\n\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_non_linear_lsq_fitter_with_weights(self, fitter):\n \"\"\"\n Tests that issue #11581 has been solved.\n \"\"\"\n fitter = fitter()\n\n np.random.seed(42)\n norder = 2\n\n fitter2 = LinearLSQFitter()\n\n model = models.Polynomial1D(norder)\n npts = 10000\n c = [2.0, -10.0, 7.0]\n tw = np.random.uniform(0.0, 10.0, npts)\n tx = np.random.uniform(0.0, 10.0, npts)\n ty = c[0] + c[1] * tx + c[2] * (tx ** 2)\n ty += np.random.normal(0.0, 1.5, npts)\n\n with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):\n tf1 = fitter(model, tx, ty, weights=tw)\n tf2 = fitter2(model, tx, ty, weights=tw)\n\n assert_allclose(tf1.parameters, tf2.parameters,\n atol=10 ** (-16))\n assert_allclose(tf1.parameters, c,\n rtol=10 ** (-2), atol=10 ** (-2))\n\n model = models.Gaussian1D()\n if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, LMLSQFitter):\n with pytest.warns(AstropyUserWarning, match=r'The fit may be unsuccessful; *.'):\n fitter(model, tx, ty, weights=tw)\n else:\n fitter(model, tx, ty, weights=tw)\n\n model = models.Polynomial2D(norder)\n nxpts = 100\n nypts = 150\n npts = nxpts * nypts\n c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]\n tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)\n tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)\n ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)\n tz = c[0] + c[1] * tx + c[2] * (tx ** 2) + c[3] * ty + c[4] * (ty ** 2) + c[5] * tx * ty\n tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)\n\n with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):\n tf1 = fitter(model, tx, ty, tz, weights=tw)\n tf2 = fitter2(model, tx, ty, tz, weights=tw)\n\n assert_allclose(tf1.parameters, tf2.parameters,\n atol=10 ** (-16))\n assert_allclose(tf1.parameters, c,\n rtol=10 ** (-2), atol=10 ** (-2))\n\n def test_simplex_lsq_fitter(self):\n \"\"\"A basic test for the `SimplexLSQ` fitter.\"\"\"\n\n class Rosenbrock(Fittable2DModel):\n a = Parameter()\n b = Parameter()\n\n @staticmethod\n def evaluate(x, y, a, b):\n return (a - x) ** 2 + b * (y - x ** 2) ** 2\n\n x = y = np.linspace(-3.0, 3.0, 100)\n with NumpyRNGContext(_RANDOM_SEED):\n z = Rosenbrock.evaluate(x, y, 1.0, 100.0)\n z += np.random.normal(0., 0.1, size=z.shape)\n\n fitter = SimplexLSQFitter()\n r_i = Rosenbrock(1, 100)\n r_f = fitter(r_i, x, y, z)\n\n assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)\n\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_param_cov(self, fitter):\n \"\"\"\n Tests that the 'param_cov' fit_info entry gets the right answer for\n *linear* least squares, where the answer is exact\n \"\"\"\n fitter = fitter()\n\n a = 2\n b = 100\n\n with NumpyRNGContext(_RANDOM_SEED):\n x = np.linspace(0, 1, 100)\n # y scatter is amplitude ~1 to make sure covarience is\n # non-negligible\n y = x*a + b + np.random.randn(len(x))\n\n # first compute the ordinary least squares covariance matrix\n X = np.vstack([x, np.ones(len(x))]).T\n beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)\n s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) /\n (len(y) - len(beta)))\n olscov = np.linalg.inv(np.matmul(X.T, X)) * s2\n\n # now do the non-linear least squares fit\n mod = models.Linear1D(a, b)\n\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n fmod = fitter(mod, x, y)\n\n assert_allclose(fmod.parameters, beta.ravel())\n assert_allclose(olscov, fitter.fit_info['param_cov'])\n\n\nclass TestEntryPoint:\n \"\"\"Tests population of fitting with entry point fitters\"\"\"\n\n def successfulimport(self):\n # This should work\n class goodclass(Fitter):\n __name__ = \"GoodClass\"\n return goodclass\n\n def raiseimporterror(self):\n # This should fail as it raises an Import Error\n raise ImportError\n\n def returnbadfunc(self):\n def badfunc():\n # This should import but it should fail type check\n pass\n return badfunc\n\n def returnbadclass(self):\n # This should import But it should fail subclass type check\n class badclass:\n pass\n return badclass\n\n def test_working(self):\n \"\"\"This should work fine\"\"\"\n mock_entry_working = mock.create_autospec(EntryPoint)\n mock_entry_working.name = \"Working\"\n mock_entry_working.load = self.successfulimport\n populate_entry_points([mock_entry_working])\n\n def test_import_error(self):\n \"\"\"This raises an import error on load to test that it is handled correctly\"\"\"\n\n mock_entry_importerror = mock.create_autospec(EntryPoint)\n mock_entry_importerror.name = \"IErr\"\n mock_entry_importerror.load = self.raiseimporterror\n\n with pytest.warns(AstropyUserWarning, match=r\".*ImportError.*\"):\n populate_entry_points([mock_entry_importerror])\n\n def test_bad_func(self):\n \"\"\"This returns a function which fails the type check\"\"\"\n\n mock_entry_badfunc = mock.create_autospec(EntryPoint)\n mock_entry_badfunc.name = \"BadFunc\"\n mock_entry_badfunc.load = self.returnbadfunc\n\n with pytest.warns(AstropyUserWarning, match=r\".*Class.*\"):\n populate_entry_points([mock_entry_badfunc])\n\n def test_bad_class(self):\n \"\"\"This returns a class which doesn't inherient from fitter \"\"\"\n\n mock_entry_badclass = mock.create_autospec(EntryPoint)\n mock_entry_badclass.name = \"BadClass\"\n mock_entry_badclass.load = self.returnbadclass\n\n with pytest.warns(AstropyUserWarning, match=r\".*BadClass.*\"):\n populate_entry_points([mock_entry_badclass])\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass Test1DFittingWithOutlierRemoval:\n def setup_class(self):\n self.x = np.linspace(-5., 5., 200)\n self.model_params = (3.0, 1.3, 0.8)\n\n def func(p, x):\n return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)\n\n self.y = func(self.model_params, self.x)\n\n @pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')\n @pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '\n r'clipping to bounds')\n @pytest.mark.parametrize('fitter', non_linear_fitters + fitters)\n def test_with_fitters_and_sigma_clip(self, fitter):\n import scipy.stats as stats\n\n fitter = fitter()\n\n np.random.seed(0)\n c = stats.bernoulli.rvs(0.25, size=self.x.shape)\n y = self.y + (np.random.normal(0., 0.2, self.x.shape) +\n c*np.random.normal(3.0, 5.0, self.x.shape))\n\n g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)\n fit = FittingWithOutlierRemoval(fitter, sigma_clip,\n niter=3, sigma=3.0)\n fitted_model, _ = fit(g_init, self.x, y)\n assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass Test2DFittingWithOutlierRemoval:\n def setup_class(self):\n self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]\n self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)\n\n def Gaussian_2D(p, pos):\n return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -\n 0.5*(pos[1] - p[1])**2 / p[3]**2)\n\n self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))\n\n def initial_guess(self, data, pos):\n y = pos[0]\n x = pos[1]\n\n \"\"\"computes the centroid of the data as the initial guess for the\n center position\"\"\"\n\n wx = x * data\n wy = y * data\n total_intensity = np.sum(data)\n x_mean = np.sum(wx) / total_intensity\n y_mean = np.sum(wy) / total_intensity\n\n x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])\n y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])\n x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)\n y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)\n\n amplitude = data[y_pos][x_pos]\n\n return amplitude, x_mean, y_mean\n\n @pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')\n @pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '\n r'clipping to bounds')\n @pytest.mark.parametrize('fitter', non_linear_fitters + fitters)\n def test_with_fitters_and_sigma_clip(self, fitter):\n import scipy.stats as stats\n\n fitter = fitter()\n\n np.random.seed(0)\n c = stats.bernoulli.rvs(0.25, size=self.z.shape)\n z = self.z + (np.random.normal(0., 0.2, self.z.shape) +\n c*np.random.normal(self.z, 2.0, self.z.shape))\n\n guess = self.initial_guess(self.z, np.array([self.y, self.x]))\n g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],\n y_mean=guess[2], x_stddev=0.75,\n y_stddev=1.25)\n\n fit = FittingWithOutlierRemoval(fitter, sigma_clip,\n niter=3, sigma=3.)\n fitted_model, _ = fit(g2_init, self.x, self.y, z)\n assert_allclose(fitted_model.parameters[0:5], self.model_params,\n atol=1e-1)\n\n\ndef test_1d_set_fitting_with_outlier_removal():\n \"\"\"Test model set fitting with outlier removal (issue #6819)\"\"\"\n\n poly_set = models.Polynomial1D(2, n_models=2)\n\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(),\n sigma_clip, sigma=2.5, niter=3,\n cenfunc=np.ma.mean, stdfunc=np.ma.std)\n\n x = np.arange(10)\n y = np.array([2.5*x - 4, 2*x*x + x + 10])\n y[1, 5] = -1000 # outlier\n\n poly_set, filt_y = fitter(poly_set, x, y)\n\n assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)\n assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)\n assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)\n\n\ndef test_2d_set_axis_2_fitting_with_outlier_removal():\n \"\"\"Test fitting 2D model set (axis 2) with outlier removal (issue #6819)\"\"\"\n\n poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)\n\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(),\n sigma_clip, sigma=2.5, niter=3,\n cenfunc=np.ma.mean, stdfunc=np.ma.std)\n\n y, x = np.mgrid[0:5, 0:5]\n z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)\n z[3, 3:5, 0] = 100. # outliers\n\n poly_set, filt_z = fitter(poly_set, x, y, z)\n assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)\n assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)\n assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass TestWeightedFittingWithOutlierRemoval:\n \"\"\"Issue #7020 \"\"\"\n\n def setup_class(self):\n # values of x,y not important as we fit y(x,y) = p0 model here\n self.y, self.x = np.mgrid[0:20, 0:20]\n self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard\n self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard\n self.z[0, 0] = 1000.0 # outlier\n self.z[0, 1] = 1000.0 # outlier\n self.x1d = self.x.flatten()\n self.z1d = self.z.flatten()\n self.weights1d = self.weights.flatten()\n\n def test_1d_without_weights_without_sigma_clip(self):\n model = models.Polynomial1D(0)\n fitter = LinearLSQFitter()\n fit = fitter(model, self.x1d, self.z1d)\n assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))\n\n def test_1d_without_weights_with_sigma_clip(self):\n model = models.Polynomial1D(0)\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,\n niter=3, sigma=3.)\n fit, mask = fitter(model, self.x1d, self.z1d)\n assert((~mask).sum() == self.z1d.size - 2)\n assert(mask[0] and mask[1])\n assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0\n\n def test_1d_with_weights_without_sigma_clip(self):\n model = models.Polynomial1D(0)\n fitter = LinearLSQFitter()\n fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)\n assert(fit.parameters[0] > 1.0) # outliers pulled it high\n\n def test_1d_with_weights_with_sigma_clip(self):\n \"\"\"\n smoke test for #7020 - fails without fitting.py\n patch because weights does not propagate\n \"\"\"\n model = models.Polynomial1D(0)\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,\n niter=3, sigma=3.)\n fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)\n assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0\n # outliers didn't pull it out of [-1:1] because they had been removed\n assert(fit.parameters[0] < 1.0)\n\n def test_1d_set_with_common_weights_with_sigma_clip(self):\n \"\"\"added for #6819 (1D model set with weights in common)\"\"\"\n model = models.Polynomial1D(0, n_models=2)\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,\n niter=3, sigma=3.)\n z1d = np.array([self.z1d, self.z1d])\n\n fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)\n assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)\n\n def test_1d_set_with_weights_with_sigma_clip(self):\n \"\"\"1D model set with separate weights\"\"\"\n model = models.Polynomial1D(0, n_models=2)\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,\n niter=3, sigma=3.)\n z1d = np.array([self.z1d, self.z1d])\n weights = np.array([self.weights1d, self.weights1d])\n\n fit, filtered = fitter(model, self.x1d, z1d, weights=weights)\n assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)\n\n def test_2d_without_weights_without_sigma_clip(self):\n model = models.Polynomial2D(0)\n fitter = LinearLSQFitter()\n fit = fitter(model, self.x, self.y, self.z)\n assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))\n\n def test_2d_without_weights_with_sigma_clip(self):\n model = models.Polynomial2D(0)\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,\n niter=3, sigma=3.)\n fit, mask = fitter(model, self.x, self.y, self.z)\n assert((~mask).sum() == self.z.size - 2)\n assert(mask[0, 0] and mask[0, 1])\n assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))\n\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_2d_with_weights_without_sigma_clip(self, fitter):\n fitter = fitter()\n\n model = models.Polynomial2D(0)\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n fit = fitter(model, self.x, self.y, self.z, weights=self.weights)\n assert(fit.parameters[0] > 1.0) # outliers pulled it high\n\n def test_2d_linear_with_weights_without_sigma_clip(self):\n model = models.Polynomial2D(0)\n fitter = LinearLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D\n fit = fitter(model, self.x, self.y, self.z, weights=self.weights)\n assert(fit.parameters[0] > 1.0) # outliers pulled it high\n\n @pytest.mark.parametrize('base_fitter', non_linear_fitters)\n def test_2d_with_weights_with_sigma_clip(self, base_fitter):\n \"\"\"smoke test for #7020 - fails without fitting.py patch because\n weights does not propagate\"\"\"\n base_fitter = base_fitter()\n\n model = models.Polynomial2D(0)\n fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip,\n niter=3, sigma=3.)\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)\n assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0\n # outliers didn't pull it out of [-1:1] because they had been removed\n assert(fit.parameters[0] < 1.0)\n\n def test_2d_linear_with_weights_with_sigma_clip(self):\n \"\"\"same as test above with a linear fitter.\"\"\"\n model = models.Polynomial2D(0)\n fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,\n niter=3, sigma=3.)\n fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)\n assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0\n # outliers didn't pull it out of [-1:1] because they had been removed\n assert(fit.parameters[0] < 1.0)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.parametrize('fitter', non_linear_fitters)\ndef test_fitters_with_weights(fitter):\n \"\"\"Issue #5737 \"\"\"\n fitter = fitter()\n\n if isinstance(fitter, _NLLSQFitter):\n pytest.xfail(\"This test is poorly designed and causes issues for \"\n \"scipy.optimize.least_squares based fitters\")\n\n Xin, Yin = np.mgrid[0:21, 0:21]\n\n with NumpyRNGContext(_RANDOM_SEED):\n zsig = np.random.normal(0, 0.01, size=Xin.shape)\n\n # Non-linear model\n g2 = models.Gaussian2D(10, 10, 9, 2, 3)\n z = g2(Xin, Yin)\n gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)\n assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))\n\n # Linear model\n p2 = models.Polynomial2D(3)\n p2.parameters = np.arange(10)/1.2\n z = p2(Xin, Yin)\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)\n assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))\n\n\ndef test_linear_fitter_with_weights():\n \"\"\"Regression test for #7035\"\"\"\n Xin, Yin = np.mgrid[0:21, 0:21]\n fitter = LinearLSQFitter()\n\n with NumpyRNGContext(_RANDOM_SEED):\n zsig = np.random.normal(0, 0.01, size=Xin.shape)\n\n p2 = models.Polynomial2D(3)\n p2.parameters = np.arange(10)/1.2\n z = p2(Xin, Yin)\n pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))\n assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))\n\n\ndef test_linear_fitter_with_weights_flat():\n \"\"\"Same as the above #7035 test but with flattened inputs\"\"\"\n Xin, Yin = np.mgrid[0:21, 0:21]\n Xin, Yin = Xin.flatten(), Yin.flatten()\n fitter = LinearLSQFitter()\n\n with NumpyRNGContext(_RANDOM_SEED):\n zsig = np.random.normal(0, 0.01, size=Xin.shape)\n\n p2 = models.Polynomial2D(3)\n p2.parameters = np.arange(10)/1.2\n z = p2(Xin, Yin)\n pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))\n assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')\n@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)\ndef test_fitters_interface(fitter):\n \"\"\"\n Test that ``**kwargs`` work with all optimizers.\n This is a basic smoke test.\n \"\"\"\n fitter = fitter()\n\n model = models.Gaussian1D(10, 4, .3)\n x = np.arange(21)\n y = model(x)\n\n if isinstance(fitter, SimplexLSQFitter):\n kwargs = {'maxiter': 79, 'verblevel': 1, 'acc': 1e-6}\n else:\n kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}\n\n if isinstance(fitter, LevMarLSQFitter) or isinstance(fitter, _NLLSQFitter):\n kwargs.pop('verblevel')\n\n _ = fitter(model, x, y, **kwargs)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.parametrize('fitter_class', [SLSQPLSQFitter, SimplexLSQFitter])\ndef test_optimizers(fitter_class):\n fitter = fitter_class()\n\n # Test maxiter\n assert fitter._opt_method.maxiter == 100\n fitter._opt_method.maxiter = 1000\n assert fitter._opt_method.maxiter == 1000\n\n # Test eps\n assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)\n fitter._opt_method.eps = 1e-16\n assert fitter._opt_method.eps == 1e-16\n\n # Test acc\n assert fitter._opt_method.acc == 1e-7\n fitter._opt_method.acc = 1e-16\n assert fitter._opt_method.acc == 1e-16\n\n # Test repr\n assert repr(fitter._opt_method) == f\"{fitter._opt_method.__class__.__name__}()\"\n\n fitparams = mk.MagicMock()\n final_func_val = mk.MagicMock()\n numiter = mk.MagicMock()\n funcalls = mk.MagicMock()\n exit_mode = 1\n mess = mk.MagicMock()\n xtol = mk.MagicMock()\n\n if fitter_class == SLSQPLSQFitter:\n return_value = (fitparams, final_func_val, numiter, exit_mode, mess)\n fit_info = {\n 'final_func_val': final_func_val,\n 'numiter': numiter,\n 'exit_mode': exit_mode,\n 'message': mess\n }\n else:\n return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)\n fit_info = {\n 'final_func_val': final_func_val,\n 'numiter': numiter,\n 'exit_mode': exit_mode,\n 'num_function_calls': funcalls\n }\n\n with mk.patch.object(fitter._opt_method.__class__, 'opt_method',\n return_value=return_value):\n with pytest.warns(AstropyUserWarning, match=r\"The fit may be unsuccessful; .*\"):\n assert (fitparams, fit_info) == fitter._opt_method(mk.MagicMock(), mk.MagicMock(),\n mk.MagicMock(), xtol=xtol)\n assert fit_info == fitter._opt_method.fit_info\n if isinstance(fitter, SLSQPLSQFitter):\n fitter._opt_method.acc == 1e-16\n else:\n fitter._opt_method.acc == xtol\n\n\n@mk.patch.multiple(Optimization, __abstractmethods__=set())\ndef test_Optimization_abstract_call():\n optimization = Optimization(mk.MagicMock())\n with pytest.raises(NotImplementedError) as err:\n optimization()\n assert str(err.value) == \"Subclasses should implement this method\"\n\n\ndef test_fitting_with_outlier_removal_niter():\n \"\"\"\n Test that FittingWithOutlierRemoval stops prior to reaching niter if the\n set of masked points has converged and correctly reports the actual number\n of iterations performed.\n \"\"\"\n\n # 2 rows with some noise around a constant level and 1 deviant point:\n x = np.arange(25)\n with NumpyRNGContext(_RANDOM_SEED):\n y = np.random.normal(loc=10., scale=1., size=(2, 25))\n y[0, 14] = 100.\n\n # Fit 2 models with up to 5 iterations (should only take 2):\n fitter = FittingWithOutlierRemoval(\n fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5,\n sigma_lower=3., sigma_upper=3., maxiters=1\n )\n model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)\n\n # Confirm that only the deviant point was rejected, in 2 iterations:\n assert_equal(np.where(mask), [[0], [14]])\n assert fitter.fit_info['niter'] == 2\n\n # Refit just the first row without any rejection iterations, to ensure\n # there are no regressions for that special case:\n fitter = FittingWithOutlierRemoval(\n fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0,\n sigma_lower=3., sigma_upper=3., maxiters=1\n )\n model, mask = fitter(models.Chebyshev1D(2), x, y[0])\n\n # Confirm that there were no iterations or rejected points:\n assert mask.sum() == 0\n assert fitter.fit_info['niter'] == 0\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass TestFittingUncertanties:\n \"\"\"\n Test that parameter covariance is calculated correctly for the fitters\n that do so (currently LevMarLSQFitter, LinearLSQFitter).\n \"\"\"\n example_1D_models = [models.Polynomial1D(2), models.Linear1D()]\n example_1D_sets = [models.Polynomial1D(2, n_models=2, model_set_axis=False),\n models.Linear1D(n_models=2, slope=[1., 1.], intercept=[0, 0])]\n\n def setup_class(self):\n np.random.seed(619)\n self.x = np.arange(10)\n self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)\n self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)\n self.rand_grid = np.random.random(100).reshape(10, 10)\n self.rand = self.rand_grid[0]\n\n @pytest.mark.parametrize(('single_model', 'model_set'),\n list(zip(example_1D_models, example_1D_sets)))\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_1d_models(self, single_model, model_set, fitter):\n \"\"\" Test that fitting uncertainties are computed correctly for 1D models\n and 1D model sets. Use covariance/stds given by LevMarLSQFitter as\n a benchmark since they are returned by the numpy fitter.\n \"\"\"\n fitter = fitter(calc_uncertainties=True)\n\n linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)\n\n # test 1D single models\n # fit single model w/ nonlinear fitter\n y = single_model(self.x) + self.rand\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n fit_model = fitter(single_model, self.x, y)\n cov_model = fit_model.cov_matrix.cov_matrix\n\n # fit single model w/ linlsq fitter\n fit_model_linlsq = linlsq_fitter(single_model, self.x, y)\n cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix\n\n # check covariance, stds computed correctly computed\n assert_allclose(cov_model_linlsq, cov_model)\n assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),\n fit_model_linlsq.stds.stds)\n\n # now test 1D model sets\n # fit set of models w/ linear fitter\n y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])\n fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)\n cov_1d_set_linlsq = [j.cov_matrix for j in\n fit_1d_set_linlsq.cov_matrix]\n\n # make sure cov matrix from single model fit w/ levmar fitter matches\n # the cov matrix of first model in the set\n assert_allclose(cov_1d_set_linlsq[0], cov_model)\n assert_allclose(np.sqrt(np.diag(cov_1d_set_linlsq[0])),\n fit_1d_set_linlsq.stds[0].stds)\n\n @pytest.mark.parametrize('fitter', non_linear_fitters)\n def test_2d_models(self, fitter):\n \"\"\"\n Test that fitting uncertainties are computed correctly for 2D models\n and 2D model sets. Use covariance/stds given by LevMarLSQFitter as\n a benchmark since they are returned by the numpy fitter.\n \"\"\"\n fitter = fitter(calc_uncertainties=True)\n\n linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)\n single_model = models.Polynomial2D(2, c0_0=2)\n model_set = models.Polynomial2D(degree=2, n_models=2, c0_0=[2, 3],\n model_set_axis=False)\n\n # fit single model w/ nonlinear fitter\n z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid\n with pytest.warns(AstropyUserWarning,\n match=r'Model is linear in parameters'):\n fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)\n cov_model = fit_model.cov_matrix.cov_matrix\n\n # fit single model w/ nonlinear fitter\n fit_model_linlsq = linlsq_fitter(single_model, self.x_grid,\n self.y_grid, z_grid)\n cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix\n assert_allclose(cov_model, cov_model_linlsq)\n assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),\n fit_model_linlsq.stds.stds)\n\n # fit 2d model set\n z_grid = model_set(self.x_grid, self.y_grid) + np.array((self.rand_grid,\n self.rand_grid))\n\n fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid,\n z_grid)\n cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]\n\n # make sure cov matrix from single model fit w/ levmar fitter matches\n # the cov matrix of first model in the set\n assert_allclose(cov_2d_set_linlsq[0], cov_model)\n assert_allclose(np.sqrt(np.diag(cov_2d_set_linlsq[0])),\n fit_2d_set_linlsq.stds[0].stds)\n\n def test_covariance_std_printing_indexing(self, capsys):\n \"\"\"\n Test printing methods and indexing.\n \"\"\"\n\n # test str representation for Covariance/stds\n fitter = LinearLSQFitter(calc_uncertainties=True)\n mod = models.Linear1D()\n fit_mod = fitter(mod, self.x, mod(self.x)+self.rand)\n print(fit_mod.cov_matrix)\n captured = capsys.readouterr()\n assert \"slope | 0.001\" in captured.out\n assert \"intercept| -0.005, 0.03\" in captured.out\n\n print(fit_mod.stds)\n captured = capsys.readouterr()\n assert \"slope | 0.032\" in captured.out\n assert \"intercept| 0.173\" in captured.out\n\n # test 'pprint' for Covariance/stds\n print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))\n captured = capsys.readouterr()\n assert \"slope | 0.00105\" in captured.out\n assert \"intercept\" not in captured.out\n\n print(fit_mod.stds.pprint(max_lines=1, round_val=5))\n captured = capsys.readouterr()\n assert \"slope | 0.03241\" in captured.out\n assert \"intercept\" not in captured.out\n\n # test indexing for Covariance class.\n assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix['slope', 'slope']\n\n # test indexing for stds class.\n assert fit_mod.stds[1] == fit_mod.stds['intercept']\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.parametrize('fitter', non_linear_fitters)\ndef test_non_finite_error(fitter):\n \"\"\"Regression test error introduced to solve issues #3575 and #12809\"\"\"\n\n x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n # Raise warning, notice fit fails due to nans\n with pytest.raises(NonFiniteValueError, match=r\"Objective function has encountered.*\"):\n fit(m_init, x, y)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.parametrize('fitter', non_linear_fitters)\ndef test_non_finite_filter_1D(fitter):\n \"\"\"Regression test filter introduced to remove non-finte values from data\"\"\"\n\n x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])\n\n m_init = models.Gaussian1D()\n fit = fitter()\n\n with pytest.warns(AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\"):\n fit(m_init, x, y, filter_non_finite=True)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.parametrize('fitter', non_linear_fitters)\ndef test_non_finite_filter_2D(fitter):\n \"\"\"Regression test filter introduced to remove non-finte values from data\"\"\"\n\n x, y = np.mgrid[0:10, 0:10]\n\n m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)\n with NumpyRNGContext(_RANDOM_SEED):\n z = m_true(x, y) + np.random.rand(*x.shape)\n z[0, 0] = np.nan\n z[3, 3] = np.inf\n z[7, 5] = -np.inf\n\n m_init = models.Gaussian2D()\n fit = fitter()\n\n with pytest.warns(AstropyUserWarning,\n match=r\"Non-Finite input data has been removed by the fitter\"):\n fit(m_init, x, y, z, filter_non_finite=True)\n"}}},{"rowIdx":1361,"cells":{"hash":{"kind":"string","value":"f28105ee94da23b4d054d8507a92511c971806543c12a91ad6315a4729449efa"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport unittest.mock as mk\n\nimport numpy as np\nimport pytest\n\nimport astropy.units as u\nfrom astropy.coordinates import SpectralCoord\nfrom astropy.modeling.bounding_box import (\n CompoundBoundingBox, ModelBoundingBox, _BaseInterval, _BaseSelectorArgument, _BoundingDomain,\n _ignored_interval, _Interval, _SelectorArgument, _SelectorArguments)\nfrom astropy.modeling.core import Model, fix_inputs\nfrom astropy.modeling.models import Gaussian1D, Gaussian2D, Identity, Polynomial2D, Scale, Shift\n\n\nclass Test_Interval:\n def test_create(self):\n lower = mk.MagicMock()\n upper = mk.MagicMock()\n interval = _Interval(lower, upper)\n assert isinstance(interval, _BaseInterval)\n assert interval.lower == lower\n assert interval.upper == upper\n assert interval == (lower, upper)\n\n assert interval.__repr__() == f\"Interval(lower={lower}, upper={upper})\"\n\n def test_copy(self):\n interval = _Interval(0.5, 1.5)\n copy = interval.copy()\n\n assert interval == copy\n assert id(interval) != id(copy)\n\n # Same float values have will have same id\n assert interval.lower == copy.lower\n assert id(interval.lower) == id(copy.lower)\n\n # Same float values have will have same id\n assert interval.upper == copy.upper\n assert id(interval.upper) == id(copy.upper)\n\n def test__validate_shape(self):\n message = \"An interval must be some sort of sequence of length 2\"\n lower = mk.MagicMock()\n upper = mk.MagicMock()\n interval = _Interval(lower, upper)\n\n # Passes (2,)\n interval._validate_shape((1, 2))\n interval._validate_shape([1, 2])\n interval._validate_shape((1*u.m, 2*u.m))\n interval._validate_shape([1*u.m, 2*u.m])\n\n # Passes (1, 2)\n interval._validate_shape(((1, 2),))\n interval._validate_shape(([1, 2],))\n interval._validate_shape([(1, 2)])\n interval._validate_shape([[1, 2]])\n interval._validate_shape(((1*u.m, 2*u.m),))\n interval._validate_shape(([1*u.m, 2*u.m],))\n interval._validate_shape([(1*u.m, 2*u.m)])\n interval._validate_shape([[1*u.m, 2*u.m]])\n\n # Passes (2, 0)\n interval._validate_shape((mk.MagicMock(), mk.MagicMock()))\n interval._validate_shape([mk.MagicMock(), mk.MagicMock()])\n\n # Passes with array inputs:\n interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))\n interval._validate_shape((np.array([-2.5, -3.5, -4.5]),\n np.array([2.5, 3.5, 4.5])))\n\n # Fails shape (no units)\n with pytest.raises(ValueError) as err:\n interval._validate_shape((1, 2, 3))\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape([1, 2, 3])\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape([[1, 2, 3], [4, 5, 6]])\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape(1)\n assert str(err.value) == message\n\n # Fails shape (units)\n message = \"An interval must be some sort of sequence of length 2\"\n with pytest.raises(ValueError) as err:\n interval._validate_shape((1*u.m, 2*u.m, 3*u.m))\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape([1*u.m, 2*u.m, 3*u.m])\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape([[1*u.m, 2*u.m, 3*u.m], [4*u.m, 5*u.m, 6*u.m]])\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape(1*u.m)\n assert str(err.value) == message\n\n # Fails shape (arrays):\n with pytest.raises(ValueError) as err:\n interval._validate_shape((np.array([-2.5, -3.5]),\n np.array([2.5, 3.5]),\n np.array([3, 4])))\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))\n assert str(err.value) == message\n\n def test__validate_bounds(self):\n # Passes\n assert _Interval._validate_bounds(1, 2) == (1, 2)\n assert _Interval._validate_bounds(1*u.m, 2*u.m) == (1*u.m, 2*u.m)\n\n interval = _Interval._validate_bounds(np.array([-2.5, -3.5]), np.array([2.5, 3.5]))\n assert (interval.lower == np.array([-2.5, -3.5])).all()\n assert (interval.upper == np.array([2.5, 3.5])).all()\n\n # Fails\n with pytest.warns(RuntimeWarning,\n match=\"Invalid interval: upper bound 1 is strictly \"\n r\"less than lower bound 2\\.\"):\n _Interval._validate_bounds(2, 1)\n with pytest.warns(RuntimeWarning,\n match=r\"Invalid interval: upper bound 1\\.0 m is strictly \"\n r\"less than lower bound 2\\.0 m\\.\"):\n _Interval._validate_bounds(2*u.m, 1*u.m)\n\n def test_validate(self):\n # Passes\n assert _Interval.validate((1, 2)) == (1, 2)\n assert _Interval.validate([1, 2]) == (1, 2)\n assert _Interval.validate((1*u.m, 2*u.m)) == (1*u.m, 2*u.m)\n assert _Interval.validate([1*u.m, 2*u.m]) == (1*u.m, 2*u.m)\n\n assert _Interval.validate(((1, 2),)) == (1, 2)\n assert _Interval.validate(([1, 2],)) == (1, 2)\n assert _Interval.validate([(1, 2)]) == (1, 2)\n assert _Interval.validate([[1, 2]]) == (1, 2)\n assert _Interval.validate(((1*u.m, 2*u.m),)) == (1*u.m, 2*u.m)\n assert _Interval.validate(([1*u.m, 2*u.m],)) == (1*u.m, 2*u.m)\n assert _Interval.validate([(1*u.m, 2*u.m)]) == (1*u.m, 2*u.m)\n assert _Interval.validate([[1*u.m, 2*u.m]]) == (1*u.m, 2*u.m)\n\n interval = _Interval.validate((np.array([-2.5, -3.5]),\n np.array([2.5, 3.5])))\n assert (interval.lower == np.array([-2.5, -3.5])).all()\n assert (interval.upper == np.array([2.5, 3.5])).all()\n interval = _Interval.validate((np.array([-2.5, -3.5, -4.5]),\n np.array([2.5, 3.5, 4.5])))\n assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()\n assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()\n\n # Fail shape\n with pytest.raises(ValueError):\n _Interval.validate((1, 2, 3))\n\n # Fail bounds\n with pytest.warns(RuntimeWarning):\n _Interval.validate((2, 1))\n\n def test_outside(self):\n interval = _Interval.validate((0, 1))\n\n assert (interval.outside(np.linspace(-1, 2, 13)) ==\n [True, True, True, True,\n False, False, False, False, False,\n True, True, True, True]).all()\n\n def test_domain(self):\n interval = _Interval.validate((0, 1))\n assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()\n\n def test__ignored_interval(self):\n assert _ignored_interval.lower == -np.inf\n assert _ignored_interval.upper == np.inf\n\n for num in [0, -1, -100, 3.14, 10**100, -10**100]:\n assert not num < _ignored_interval[0]\n assert num > _ignored_interval[0]\n\n assert not num > _ignored_interval[1]\n assert num < _ignored_interval[1]\n\n assert not (_ignored_interval.outside(np.array([num]))).all()\n\n def test_validate_with_SpectralCoord(self):\n \"\"\"Regression test for issue #12439\"\"\"\n\n lower = SpectralCoord(1, u.um)\n upper = SpectralCoord(10, u.um)\n\n interval = _Interval.validate((lower, upper))\n assert interval.lower == lower\n assert interval.upper == upper\n\n\nclass Test_BoundingDomain:\n def setup(self):\n class BoundingDomain(_BoundingDomain):\n def fix_inputs(self, model, fix_inputs):\n super().fix_inputs(model, fixed_inputs=fix_inputs)\n\n def prepare_inputs(self, input_shape, inputs):\n super().prepare_inputs(input_shape, inputs)\n\n self.BoundingDomain = BoundingDomain\n\n def test_create(self):\n model = mk.MagicMock()\n bounding_box = self.BoundingDomain(model)\n assert bounding_box._model == model\n assert bounding_box._ignored == []\n assert bounding_box._order == 'C'\n\n bounding_box = self.BoundingDomain(model, order='F')\n assert bounding_box._model == model\n assert bounding_box._ignored == []\n assert bounding_box._order == 'F'\n\n bounding_box = self.BoundingDomain(Gaussian2D(), ['x'])\n assert bounding_box._ignored == [0]\n assert bounding_box._order == 'C'\n\n # Error\n with pytest.raises(ValueError):\n self.BoundingDomain(model, order=mk.MagicMock())\n\n def test_model(self):\n model = mk.MagicMock()\n bounding_box = self.BoundingDomain(model)\n assert bounding_box._model == model\n assert bounding_box.model == model\n\n def test_order(self):\n bounding_box = self.BoundingDomain(mk.MagicMock(), order='C')\n assert bounding_box._order == 'C'\n assert bounding_box.order == 'C'\n\n bounding_box = self.BoundingDomain(mk.MagicMock(), order='F')\n assert bounding_box._order == 'F'\n assert bounding_box.order == 'F'\n\n bounding_box._order = 'test'\n assert bounding_box.order == 'test'\n\n def test_ignored(self):\n ignored = [0]\n model = mk.MagicMock()\n model.n_inputs = 1\n model.inputs = ['x']\n bounding_box = self.BoundingDomain(model, ignored=ignored)\n\n assert bounding_box._ignored == ignored\n assert bounding_box.ignored == ignored\n\n def test__get_order(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n # Success (default 'C')\n assert bounding_box._order == 'C'\n assert bounding_box._get_order() == 'C'\n assert bounding_box._get_order('C') == 'C'\n assert bounding_box._get_order('F') == 'F'\n\n # Success (default 'F')\n bounding_box._order = 'F'\n assert bounding_box._order == 'F'\n assert bounding_box._get_order() == 'F'\n assert bounding_box._get_order('C') == 'C'\n assert bounding_box._get_order('F') == 'F'\n\n # Error\n order = mk.MagicMock()\n with pytest.raises(ValueError) as err:\n bounding_box._get_order(order)\n assert str(err.value) == (\"order must be either 'C' (C/python order) or \"\n f\"'F' (Fortran/mathematical order), got: {order}.\")\n\n def test__get_index(self):\n bounding_box = self.BoundingDomain(Gaussian2D())\n\n # Pass input name\n assert bounding_box._get_index('x') == 0\n assert bounding_box._get_index('y') == 1\n\n # Pass invalid input name\n with pytest.raises(ValueError) as err:\n bounding_box._get_index('z')\n assert str(err.value) == \"'z' is not one of the inputs: ('x', 'y').\"\n\n # Pass valid index\n assert bounding_box._get_index(0) == 0\n assert bounding_box._get_index(1) == 1\n assert bounding_box._get_index(np.int32(0)) == 0\n assert bounding_box._get_index(np.int32(1)) == 1\n assert bounding_box._get_index(np.int64(0)) == 0\n assert bounding_box._get_index(np.int64(1)) == 1\n\n # Pass invalid index\n MESSAGE = \"Integer key: 2 must be non-negative and < 2.\"\n with pytest.raises(IndexError) as err:\n bounding_box._get_index(2)\n assert str(err.value) == MESSAGE\n with pytest.raises(IndexError) as err:\n bounding_box._get_index(np.int32(2))\n assert str(err.value) == MESSAGE\n with pytest.raises(IndexError) as err:\n bounding_box._get_index(np.int64(2))\n assert str(err.value) == MESSAGE\n with pytest.raises(IndexError) as err:\n bounding_box._get_index(-1)\n assert str(err.value) == \"Integer key: -1 must be non-negative and < 2.\"\n\n # Pass invalid key\n value = mk.MagicMock()\n with pytest.raises(ValueError) as err:\n bounding_box._get_index(value)\n assert str(err.value) == f\"Key value: {value} must be string or integer.\"\n\n def test__get_name(self):\n model = mk.MagicMock()\n model.n_inputs = 1\n model.inputs = ['x']\n bounding_box = self.BoundingDomain(model)\n\n index = mk.MagicMock()\n name = mk.MagicMock()\n model.inputs = mk.MagicMock()\n model.inputs.__getitem__.return_value = name\n assert bounding_box._get_name(index) == name\n assert model.inputs.__getitem__.call_args_list == [mk.call(index)]\n\n def test_ignored_inputs(self):\n model = mk.MagicMock()\n ignored = list(range(4, 8))\n model.n_inputs = 8\n model.inputs = [mk.MagicMock() for _ in range(8)]\n bounding_box = self.BoundingDomain(model, ignored=ignored)\n\n inputs = bounding_box.ignored_inputs\n assert isinstance(inputs, list)\n for index, _input in enumerate(inputs):\n assert _input in model.inputs\n assert model.inputs[index + 4] == _input\n for index, _input in enumerate(model.inputs):\n if _input in inputs:\n assert inputs[index - 4] == _input\n else:\n assert index < 4\n\n def test__validate_ignored(self):\n bounding_box = self.BoundingDomain(Gaussian2D())\n\n # Pass\n assert bounding_box._validate_ignored(None) == []\n assert bounding_box._validate_ignored(['x', 'y']) == [0, 1]\n assert bounding_box._validate_ignored([0, 1]) == [0, 1]\n assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]\n\n # Fail\n with pytest.raises(ValueError):\n bounding_box._validate_ignored([mk.MagicMock()])\n with pytest.raises(ValueError):\n bounding_box._validate_ignored(['z'])\n with pytest.raises(IndexError):\n bounding_box._validate_ignored([3])\n with pytest.raises(IndexError):\n bounding_box._validate_ignored([np.int32(3)])\n with pytest.raises(IndexError):\n bounding_box._validate_ignored([np.int64(3)])\n\n def test___call__(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n args = tuple(mk.MagicMock() for _ in range(3))\n kwargs = {f\"test{idx}\": mk.MagicMock() for idx in range(3)}\n\n with pytest.raises(RuntimeError) as err:\n bounding_box(*args, **kwargs)\n assert str(err.value) == (\"This bounding box is fixed by the model and does not have \"\n \"adjustable parameters.\")\n\n def test_fix_inputs(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n model = mk.MagicMock()\n fixed_inputs = mk.MagicMock()\n\n with pytest.raises(NotImplementedError) as err:\n bounding_box.fix_inputs(model, fixed_inputs)\n assert str(err.value) == \"This should be implemented by a child class.\"\n\n def test__prepare_inputs(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n with pytest.raises(NotImplementedError) as err:\n bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())\n assert str(err.value) == \"This has not been implemented for BoundingDomain.\"\n\n def test__base_ouput(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n # Simple shape\n input_shape = (13,)\n output = bounding_box._base_output(input_shape, 0)\n assert (output == 0).all()\n assert output.shape == input_shape\n output = bounding_box._base_output(input_shape, np.nan)\n assert (np.isnan(output)).all()\n assert output.shape == input_shape\n output = bounding_box._base_output(input_shape, 14)\n assert (output == 14).all()\n assert output.shape == input_shape\n\n # Complex shape\n input_shape = (13, 7)\n output = bounding_box._base_output(input_shape, 0)\n assert (output == 0).all()\n assert output.shape == input_shape\n output = bounding_box._base_output(input_shape, np.nan)\n assert (np.isnan(output)).all()\n assert output.shape == input_shape\n output = bounding_box._base_output(input_shape, 14)\n assert (output == 14).all()\n assert output.shape == input_shape\n\n def test__all_out_output(self):\n model = mk.MagicMock()\n bounding_box = self.BoundingDomain(model)\n\n # Simple shape\n model.n_outputs = 1\n input_shape = (13,)\n output, output_unit = bounding_box._all_out_output(input_shape, 0)\n assert (np.array(output) == 0).all()\n assert np.array(output).shape == (1, 13)\n assert output_unit is None\n\n # Complex shape\n model.n_outputs = 6\n input_shape = (13, 7)\n output, output_unit = bounding_box._all_out_output(input_shape, 0)\n assert (np.array(output) == 0).all()\n assert np.array(output).shape == (6, 13, 7)\n assert output_unit is None\n\n def test__modify_output(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n valid_index = mk.MagicMock()\n input_shape = mk.MagicMock()\n fill_value = mk.MagicMock()\n\n # Simple shape\n with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,\n return_value=np.asanyarray(0)) as mkBase:\n assert (np.array([1, 2, 3]) ==\n bounding_box._modify_output([1, 2, 3], valid_index,\n input_shape, fill_value)).all()\n assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]\n\n # Replacement\n with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,\n return_value=np.array([1, 2, 3, 4, 5, 6])) as mkBase:\n assert (np.array([7, 2, 8, 4, 9, 6]) ==\n bounding_box._modify_output([7, 8, 9], np.array([[0, 2, 4]]),\n input_shape, fill_value)).all()\n assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]\n\n def test__prepare_outputs(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n valid_index = mk.MagicMock()\n input_shape = mk.MagicMock()\n fill_value = mk.MagicMock()\n\n valid_outputs = [mk.MagicMock() for _ in range(3)]\n effects = [mk.MagicMock() for _ in range(3)]\n with mk.patch.object(_BoundingDomain, '_modify_output', autospec=True,\n side_effect=effects) as mkModify:\n assert effects == bounding_box._prepare_outputs(valid_outputs, valid_index,\n input_shape, fill_value)\n assert mkModify.call_args_list == [\n mk.call(bounding_box, valid_outputs[idx], valid_index, input_shape, fill_value)\n for idx in range(3)\n ]\n\n def test_prepare_outputs(self):\n model = mk.MagicMock()\n bounding_box = self.BoundingDomain(model)\n\n valid_outputs = mk.MagicMock()\n valid_index = mk.MagicMock()\n input_shape = mk.MagicMock()\n fill_value = mk.MagicMock()\n\n with mk.patch.object(_BoundingDomain, '_prepare_outputs', autospec=True) as mkPrepare:\n # Reshape valid_outputs\n model.n_outputs = 1\n assert mkPrepare.return_value == bounding_box.prepare_outputs(valid_outputs,\n valid_index,\n input_shape,\n fill_value)\n assert mkPrepare.call_args_list == [\n mk.call(bounding_box, [valid_outputs], valid_index, input_shape, fill_value)\n ]\n mkPrepare.reset_mock()\n\n # No reshape valid_outputs\n model.n_outputs = 2\n assert mkPrepare.return_value == bounding_box.prepare_outputs(valid_outputs,\n valid_index,\n input_shape,\n fill_value)\n assert mkPrepare.call_args_list == [\n mk.call(bounding_box, valid_outputs, valid_index, input_shape, fill_value)\n ]\n\n def test__get_valid_outputs_unit(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n # Don't get unit\n assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None\n\n # Get unit from unitless\n assert bounding_box._get_valid_outputs_unit(7, True) is None\n\n # Get unit\n assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m\n\n def test__evaluate_model(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n evaluate = mk.MagicMock()\n valid_inputs = mk.MagicMock()\n input_shape = mk.MagicMock()\n valid_index = mk.MagicMock()\n fill_value = mk.MagicMock()\n with_units = mk.MagicMock()\n\n with mk.patch.object(_BoundingDomain, '_get_valid_outputs_unit',\n autospec=True) as mkGet:\n with mk.patch.object(_BoundingDomain, 'prepare_outputs',\n autospec=True) as mkPrepare:\n assert bounding_box._evaluate_model(evaluate, valid_inputs,\n valid_index, input_shape,\n fill_value, with_units) == (\n mkPrepare.return_value,\n mkGet.return_value\n )\n assert mkPrepare.call_args_list == [mk.call(bounding_box, evaluate.return_value,\n valid_index, input_shape, fill_value)]\n assert mkGet.call_args_list == [mk.call(evaluate.return_value, with_units)]\n assert evaluate.call_args_list == [mk.call(valid_inputs)]\n\n def test__evaluate(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n evaluate = mk.MagicMock()\n inputs = mk.MagicMock()\n input_shape = mk.MagicMock()\n fill_value = mk.MagicMock()\n with_units = mk.MagicMock()\n\n valid_inputs = mk.MagicMock()\n valid_index = mk.MagicMock()\n\n effects = [(valid_inputs, valid_index, True), (valid_inputs, valid_index, False)]\n with mk.patch.object(self.BoundingDomain, 'prepare_inputs', autospec=True,\n side_effect=effects) as mkPrepare:\n with mk.patch.object(_BoundingDomain, '_all_out_output',\n autospec=True) as mkAll:\n with mk.patch.object(_BoundingDomain, '_evaluate_model',\n autospec=True) as mkEvaluate:\n # all_out\n assert bounding_box._evaluate(evaluate, inputs, input_shape,\n fill_value, with_units) == mkAll.return_value\n assert mkAll.call_args_list == [mk.call(bounding_box, input_shape, fill_value)]\n assert mkEvaluate.call_args_list == []\n assert mkPrepare.call_args_list == [mk.call(bounding_box, input_shape, inputs)]\n\n mkAll.reset_mock()\n mkPrepare.reset_mock()\n\n # not all_out\n assert bounding_box._evaluate(evaluate, inputs, input_shape,\n fill_value, with_units) == mkEvaluate.return_value\n assert mkAll.call_args_list == []\n assert mkEvaluate.call_args_list == [mk.call(bounding_box, evaluate,\n valid_inputs, valid_index,\n input_shape, fill_value,\n with_units)]\n assert mkPrepare.call_args_list == [mk.call(bounding_box, input_shape, inputs)]\n\n def test__set_outputs_unit(self):\n bounding_box = self.BoundingDomain(mk.MagicMock())\n\n # set no unit\n assert 27 == bounding_box._set_outputs_unit(27, None)\n\n # set unit\n assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m)\n\n def test_evaluate(self):\n bounding_box = self.BoundingDomain(Gaussian2D())\n\n evaluate = mk.MagicMock()\n inputs = mk.MagicMock()\n fill_value = mk.MagicMock()\n\n outputs = mk.MagicMock()\n valid_outputs_unit = mk.MagicMock()\n value = (outputs, valid_outputs_unit)\n with mk.patch.object(_BoundingDomain, '_evaluate',\n autospec=True, return_value=value) as mkEvaluate:\n with mk.patch.object(_BoundingDomain, '_set_outputs_unit',\n autospec=True) as mkSet:\n with mk.patch.object(Model, 'input_shape', autospec=True) as mkShape:\n with mk.patch.object(Model, 'bbox_with_units',\n new_callable=mk.PropertyMock) as mkUnits:\n assert tuple(mkSet.return_value) == bounding_box.evaluate(evaluate, inputs,\n fill_value)\n assert mkSet.call_args_list == [mk.call(outputs, valid_outputs_unit)]\n assert mkEvaluate.call_args_list == [mk.call(bounding_box, evaluate, inputs,\n mkShape.return_value,\n fill_value,\n mkUnits.return_value)]\n assert mkShape.call_args_list == [mk.call(bounding_box._model, inputs)]\n assert mkUnits.call_args_list == [mk.call()]\n\n\nclass TestModelBoundingBox:\n def test_create(self):\n intervals = ()\n model = mk.MagicMock()\n bounding_box = ModelBoundingBox(intervals, model)\n\n assert isinstance(bounding_box, _BoundingDomain)\n assert bounding_box._intervals == {}\n assert bounding_box._model == model\n assert bounding_box._ignored == []\n assert bounding_box._order == 'C'\n\n # Set optional\n intervals = {}\n model = mk.MagicMock()\n bounding_box = ModelBoundingBox(intervals, model, order='F')\n\n assert isinstance(bounding_box, _BoundingDomain)\n assert bounding_box._intervals == {}\n assert bounding_box._model == model\n assert bounding_box._ignored == []\n assert bounding_box._order == 'F'\n\n # Set interval\n intervals = (1, 2)\n model = mk.MagicMock()\n model.n_inputs = 1\n model.inputs = ['x']\n bounding_box = ModelBoundingBox(intervals, model)\n\n assert isinstance(bounding_box, _BoundingDomain)\n assert bounding_box._intervals == {0: (1, 2)}\n assert bounding_box._model == model\n\n # Set ignored\n intervals = (1, 2)\n model = mk.MagicMock()\n model.n_inputs = 2\n model.inputs = ['x', 'y']\n bounding_box = ModelBoundingBox(intervals, model, ignored=[1])\n\n assert isinstance(bounding_box, _BoundingDomain)\n assert bounding_box._intervals == {0: (1, 2)}\n assert bounding_box._model == model\n assert bounding_box._ignored == [1]\n\n intervals = ((1, 2), (3, 4))\n model = mk.MagicMock()\n model.n_inputs = 3\n model.inputs = ['x', 'y', 'z']\n bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order='F')\n\n assert isinstance(bounding_box, _BoundingDomain)\n assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}\n assert bounding_box._model == model\n assert bounding_box._ignored == [2]\n assert bounding_box._order == 'F'\n\n def test_copy(self):\n bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4)))\n copy = bounding_box.copy()\n\n assert bounding_box == copy\n assert id(bounding_box) != id(copy)\n\n assert bounding_box.ignored == copy.ignored\n assert id(bounding_box.ignored) != id(copy.ignored)\n\n # model is not copied to prevent infinite recursion\n assert bounding_box._model == copy._model\n assert id(bounding_box._model) == id(copy._model)\n\n # Same string values have will have same id\n assert bounding_box._order == copy._order\n assert id(bounding_box._order) == id(copy._order)\n\n # Check interval objects\n for index, interval in bounding_box.intervals.items():\n assert interval == copy.intervals[index]\n assert id(interval) != id(copy.intervals[index])\n\n # Same float values have will have same id\n assert interval.lower == copy.intervals[index].lower\n assert id(interval.lower) == id(copy.intervals[index].lower)\n\n # Same float values have will have same id\n assert interval.upper == copy.intervals[index].upper\n assert id(interval.upper) == id(copy.intervals[index].upper)\n assert len(bounding_box.intervals) == len(copy.intervals)\n assert bounding_box.intervals.keys() == copy.intervals.keys()\n\n def test_intervals(self):\n intervals = {0: _Interval(1, 2)}\n model = mk.MagicMock()\n model.n_inputs = 1\n model.inputs = ['x']\n bounding_box = ModelBoundingBox(intervals, model)\n\n assert bounding_box._intervals == intervals\n assert bounding_box.intervals == intervals\n\n def test_named_intervals(self):\n intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}\n model = mk.MagicMock()\n model.n_inputs = 4\n model.inputs = [mk.MagicMock() for _ in range(4)]\n bounding_box = ModelBoundingBox(intervals, model)\n\n named = bounding_box.named_intervals\n assert isinstance(named, dict)\n for name, interval in named.items():\n assert name in model.inputs\n assert intervals[model.inputs.index(name)] == interval\n for index, name in enumerate(model.inputs):\n assert index in intervals\n assert name in named\n assert intervals[index] == named[name]\n\n def test___repr__(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n assert bounding_box.__repr__() == (\n \"ModelBoundingBox(\\n\"\n \" intervals={\\n\"\n \" x: Interval(lower=-1, upper=1)\\n\"\n \" y: Interval(lower=-4, upper=4)\\n\"\n \" }\\n\"\n \" model=Gaussian2D(inputs=('x', 'y'))\\n\"\n \" order='C'\\n\"\n \")\"\n )\n\n intervals = {0: _Interval(-1, 1)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])\n\n assert bounding_box.__repr__() == (\n \"ModelBoundingBox(\\n\"\n \" intervals={\\n\"\n \" x: Interval(lower=-1, upper=1)\\n\"\n \" }\\n\"\n \" ignored=['y']\\n\"\n \" model=Gaussian2D(inputs=('x', 'y'))\\n\"\n \" order='C'\\n\"\n \")\"\n )\n\n def test___len__(self):\n intervals = {0: _Interval(-1, 1)}\n model = Gaussian1D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert len(bounding_box) == 1 == len(bounding_box._intervals)\n\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert len(bounding_box) == 2 == len(bounding_box._intervals)\n\n bounding_box._intervals = {}\n assert len(bounding_box) == 0 == len(bounding_box._intervals)\n\n def test___contains__(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # Contains with keys\n assert 'x' in bounding_box\n assert 'y' in bounding_box\n assert 'z' not in bounding_box\n\n # Contains with index\n assert 0 in bounding_box\n assert 1 in bounding_box\n assert 2 not in bounding_box\n\n # General not in\n assert mk.MagicMock() not in bounding_box\n\n # Contains with ignored\n del bounding_box['y']\n\n # Contains with keys\n assert 'x' in bounding_box\n assert 'y' in bounding_box\n assert 'z' not in bounding_box\n\n # Contains with index\n assert 0 in bounding_box\n assert 1 in bounding_box\n assert 2 not in bounding_box\n\n def test___getitem__(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # Get using input key\n assert bounding_box['x'] == (-1, 1)\n assert bounding_box['y'] == (-4, 4)\n\n # Fail with input key\n with pytest.raises(ValueError):\n bounding_box['z']\n\n # Get using index\n assert bounding_box[0] == (-1, 1)\n assert bounding_box[1] == (-4, 4)\n assert bounding_box[np.int32(0)] == (-1, 1)\n assert bounding_box[np.int32(1)] == (-4, 4)\n assert bounding_box[np.int64(0)] == (-1, 1)\n assert bounding_box[np.int64(1)] == (-4, 4)\n\n # Fail with index\n with pytest.raises(IndexError):\n bounding_box[2]\n with pytest.raises(IndexError):\n bounding_box[np.int32(2)]\n with pytest.raises(IndexError):\n bounding_box[np.int64(2)]\n\n # get ignored interval\n del bounding_box[0]\n assert bounding_box[0] == _ignored_interval\n assert bounding_box[1] == (-4, 4)\n\n del bounding_box[1]\n assert bounding_box[0] == _ignored_interval\n assert bounding_box[1] == _ignored_interval\n\n def test_bounding_box(self):\n # 0D\n model = Gaussian1D()\n bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x'])\n assert bounding_box.bounding_box() == (-np.inf, np.inf)\n assert bounding_box.bounding_box('C') == (-np.inf, np.inf)\n assert bounding_box.bounding_box('F') == (-np.inf, np.inf)\n\n # 1D\n intervals = {0: _Interval(-1, 1)}\n model = Gaussian1D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert bounding_box.bounding_box() == (-1, 1)\n assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)\n\n # > 1D\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))\n assert bounding_box.bounding_box('C') == ((-4, 4), (-1, 1))\n assert bounding_box.bounding_box('F') == ((-1, 1), (-4, 4))\n\n def test___eq__(self):\n intervals = {0: _Interval(-1, 1)}\n model = Gaussian1D()\n bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())\n\n assert bounding_box == bounding_box\n assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())\n assert bounding_box == (-1, 1)\n\n assert not (bounding_box == mk.MagicMock())\n assert not (bounding_box == (-2, 2))\n assert not (bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)}))\n\n # Respect ordering\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box_1 = ModelBoundingBox.validate(model, intervals)\n bounding_box_2 = ModelBoundingBox.validate(model, intervals, order='F')\n assert bounding_box_1._order == 'C'\n assert bounding_box_1 == ((-4, 4), (-1, 1))\n assert not (bounding_box_1 == ((-1, 1), (-4, 4)))\n\n assert bounding_box_2._order == 'F'\n assert not (bounding_box_2 == ((-4, 4), (-1, 1)))\n assert bounding_box_2 == ((-1, 1), (-4, 4))\n\n assert bounding_box_1 == bounding_box_2\n\n # Respect ignored\n model = Gaussian2D()\n bounding_box_1._ignored = [mk.MagicMock()]\n bounding_box_2._ignored = [mk.MagicMock()]\n assert bounding_box_1._ignored != bounding_box_2._ignored\n assert not (bounding_box_1 == bounding_box_2)\n\n def test__setitem__(self):\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])\n assert bounding_box._ignored == [0, 1]\n\n # USING Intervals directly\n # Set interval using key\n assert 0 not in bounding_box.intervals\n assert 0 in bounding_box.ignored\n bounding_box['x'] = _Interval(-1, 1)\n assert 0 in bounding_box.intervals\n assert 0 not in bounding_box.ignored\n assert isinstance(bounding_box['x'], _Interval)\n assert bounding_box['x'] == (-1, 1)\n\n assert 1 not in bounding_box.intervals\n assert 1 in bounding_box.ignored\n bounding_box['y'] = _Interval(-4, 4)\n assert 1 in bounding_box.intervals\n assert 1 not in bounding_box.ignored\n assert isinstance(bounding_box['y'], _Interval)\n assert bounding_box['y'] == (-4, 4)\n\n del bounding_box['x']\n del bounding_box['y']\n\n # Set interval using index\n assert 0 not in bounding_box.intervals\n assert 0 in bounding_box.ignored\n bounding_box[0] = _Interval(-1, 1)\n assert 0 in bounding_box.intervals\n assert 0 not in bounding_box.ignored\n assert isinstance(bounding_box[0], _Interval)\n assert bounding_box[0] == (-1, 1)\n\n assert 1 not in bounding_box.intervals\n assert 1 in bounding_box.ignored\n bounding_box[1] = _Interval(-4, 4)\n assert 1 in bounding_box.intervals\n assert 1 not in bounding_box.ignored\n assert isinstance(bounding_box[1], _Interval)\n assert bounding_box[1] == (-4, 4)\n\n del bounding_box[0]\n del bounding_box[1]\n\n # USING tuples\n # Set interval using key\n assert 0 not in bounding_box.intervals\n assert 0 in bounding_box.ignored\n bounding_box['x'] = (-1, 1)\n assert 0 in bounding_box.intervals\n assert 0 not in bounding_box.ignored\n assert isinstance(bounding_box['x'], _Interval)\n assert bounding_box['x'] == (-1, 1)\n\n assert 1 not in bounding_box.intervals\n assert 1 in bounding_box.ignored\n bounding_box['y'] = (-4, 4)\n assert 1 in bounding_box.intervals\n assert 1 not in bounding_box.ignored\n assert isinstance(bounding_box['y'], _Interval)\n assert bounding_box['y'] == (-4, 4)\n\n del bounding_box['x']\n del bounding_box['y']\n\n # Set interval using index\n assert 0 not in bounding_box.intervals\n assert 0 in bounding_box.ignored\n bounding_box[0] = (-1, 1)\n assert 0 in bounding_box.intervals\n assert 0 not in bounding_box.ignored\n assert isinstance(bounding_box[0], _Interval)\n assert bounding_box[0] == (-1, 1)\n\n assert 1 not in bounding_box.intervals\n assert 1 in bounding_box.ignored\n bounding_box[1] = (-4, 4)\n assert 1 in bounding_box.intervals\n assert 1 not in bounding_box.ignored\n assert isinstance(bounding_box[1], _Interval)\n assert bounding_box[1] == (-4, 4)\n\n # Model set support\n model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)\n bounding_box = ModelBoundingBox({}, model)\n # USING Intervals directly\n # Set interval using key\n assert 'x' not in bounding_box\n bounding_box['x'] = _Interval(np.array([-1, -2]), np.array([1, 2]))\n assert 'x' in bounding_box\n assert isinstance(bounding_box['x'], _Interval)\n assert (bounding_box['x'].lower == np.array([-1, -2])).all()\n assert (bounding_box['x'].upper == np.array([1, 2])).all()\n # Set interval using index\n bounding_box._intervals = {}\n assert 0 not in bounding_box\n bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))\n assert 0 in bounding_box\n assert isinstance(bounding_box[0], _Interval)\n assert (bounding_box[0].lower == np.array([-1, -2])).all()\n assert (bounding_box[0].upper == np.array([1, 2])).all()\n # USING tuples\n # Set interval using key\n bounding_box._intervals = {}\n assert 'x' not in bounding_box\n bounding_box['x'] = (np.array([-1, -2]), np.array([1, 2]))\n assert 'x' in bounding_box\n assert isinstance(bounding_box['x'], _Interval)\n assert (bounding_box['x'].lower == np.array([-1, -2])).all()\n assert (bounding_box['x'].upper == np.array([1, 2])).all()\n # Set interval using index\n bounding_box._intervals = {}\n assert 0 not in bounding_box\n bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))\n assert 0 in bounding_box\n assert isinstance(bounding_box[0], _Interval)\n assert (bounding_box[0].lower == np.array([-1, -2])).all()\n assert (bounding_box[0].upper == np.array([1, 2])).all()\n\n def test___delitem__(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # Using index\n assert 0 in bounding_box.intervals\n assert 0 not in bounding_box.ignored\n assert 0 in bounding_box\n assert 'x' in bounding_box\n del bounding_box[0]\n assert 0 not in bounding_box.intervals\n assert 0 in bounding_box.ignored\n assert 0 in bounding_box\n assert 'x' in bounding_box\n\n # Delete an ignored item\n with pytest.raises(RuntimeError) as err:\n del bounding_box[0]\n assert str(err.value) == \"Cannot delete ignored input: 0!\"\n\n # Using key\n assert 1 in bounding_box.intervals\n assert 1 not in bounding_box.ignored\n assert 0 in bounding_box\n assert 'y' in bounding_box\n del bounding_box['y']\n assert 1 not in bounding_box.intervals\n assert 1 in bounding_box.ignored\n assert 0 in bounding_box\n assert 'y' in bounding_box\n\n # Delete an ignored item\n with pytest.raises(RuntimeError) as err:\n del bounding_box['y']\n assert str(err.value) == \"Cannot delete ignored input: y!\"\n\n def test__validate_dict(self):\n model = Gaussian2D()\n bounding_box = ModelBoundingBox({}, model)\n\n # Input name keys\n intervals = {'x': _Interval(-1, 1), 'y': _Interval(-4, 4)}\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate_dict(intervals)\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Input index\n bounding_box._intervals = {}\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n assert 0 not in bounding_box\n assert 1 not in bounding_box\n bounding_box._validate_dict(intervals)\n assert 0 in bounding_box\n assert bounding_box[0] == (-1, 1)\n assert 1 in bounding_box\n assert bounding_box[1] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Model set support\n model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)\n bounding_box = ModelBoundingBox({}, model)\n # name keys\n intervals = {'x': _Interval(np.array([-1, -2]), np.array([1, 2]))}\n assert 'x' not in bounding_box\n bounding_box._validate_dict(intervals)\n assert 'x' in bounding_box\n assert (bounding_box['x'].lower == np.array([-1, -2])).all()\n assert (bounding_box['x'].upper == np.array([1, 2])).all()\n # input index\n bounding_box._intervals = {}\n intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}\n assert 0 not in bounding_box\n bounding_box._validate_dict(intervals)\n assert 0 in bounding_box\n assert (bounding_box[0].lower == np.array([-1, -2])).all()\n assert (bounding_box[0].upper == np.array([1, 2])).all()\n\n def test__validate_sequence(self):\n model = Gaussian2D()\n bounding_box = ModelBoundingBox({}, model)\n\n # Default order\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate_sequence(((-4, 4), (-1, 1)))\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # C order\n bounding_box._intervals = {}\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='C')\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Fortran order\n bounding_box._intervals = {}\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='F')\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-4, 4)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-1, 1)\n assert len(bounding_box.intervals) == 2\n\n # Invalid order\n bounding_box._intervals = {}\n order = mk.MagicMock()\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n with pytest.raises(ValueError):\n bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=order)\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n assert len(bounding_box.intervals) == 0\n\n def test__n_inputs(self):\n model = Gaussian2D()\n\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert bounding_box._n_inputs == 2\n\n intervals = {0: _Interval(-1, 1)}\n bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])\n assert bounding_box._n_inputs == 1\n\n bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x', 'y'])\n assert bounding_box._n_inputs == 0\n\n bounding_box._ignored = ['x', 'y', 'z']\n assert bounding_box._n_inputs == 0\n\n def test__validate_iterable(self):\n model = Gaussian2D()\n bounding_box = ModelBoundingBox({}, model)\n\n # Pass sequence Default order\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate_iterable(((-4, 4), (-1, 1)))\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Pass sequence\n bounding_box._intervals = {}\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate_iterable(((-4, 4), (-1, 1)), order='F')\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-4, 4)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-1, 1)\n assert len(bounding_box.intervals) == 2\n\n # Pass Dict\n bounding_box._intervals = {}\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n assert 0 not in bounding_box\n assert 1 not in bounding_box\n bounding_box._validate_iterable(intervals)\n assert 0 in bounding_box\n assert bounding_box[0] == (-1, 1)\n assert 1 in bounding_box\n assert bounding_box[1] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Pass with ignored\n bounding_box._intervals = {}\n bounding_box._ignored = [1]\n intervals = {0: _Interval(-1, 1)}\n assert 0 not in bounding_box.intervals\n bounding_box._validate_iterable(intervals)\n assert 0 in bounding_box.intervals\n assert bounding_box[0] == (-1, 1)\n\n # Invalid iterable\n bounding_box._intervals = {}\n bounding_box._ignored = []\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n with pytest.raises(ValueError) as err:\n bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))\n assert str(err.value) == \"Found 3 intervals, but must have exactly 2.\"\n assert len(bounding_box.intervals) == 0\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._ignored = [1]\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n with pytest.raises(ValueError) as err:\n bounding_box._validate_iterable(intervals)\n assert str(err.value) == \"Found 2 intervals, but must have exactly 1.\"\n assert len(bounding_box.intervals) == 0\n bounding_box._ignored = []\n intervals = {0: _Interval(-1, 1)}\n with pytest.raises(ValueError) as err:\n bounding_box._validate_iterable(intervals)\n assert str(err.value) == \"Found 1 intervals, but must have exactly 2.\"\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n assert len(bounding_box.intervals) == 0\n\n def test__validate(self):\n model = Gaussian2D()\n bounding_box = ModelBoundingBox({}, model)\n\n # Pass sequence Default order\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate(((-4, 4), (-1, 1)))\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Pass sequence\n bounding_box._intervals = {}\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate(((-4, 4), (-1, 1)), order='F')\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-4, 4)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-1, 1)\n assert len(bounding_box.intervals) == 2\n\n # Pass Dict\n bounding_box._intervals = {}\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n assert 'x' not in bounding_box\n assert 'y' not in bounding_box\n bounding_box._validate(intervals)\n assert 0 in bounding_box\n assert bounding_box[0] == (-1, 1)\n assert 1 in bounding_box\n assert bounding_box[1] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Pass single with ignored\n intervals = {0: _Interval(-1, 1)}\n bounding_box = ModelBoundingBox({}, model, ignored=[1])\n\n assert 0 not in bounding_box.intervals\n assert 1 not in bounding_box.intervals\n bounding_box._validate(intervals)\n assert 0 in bounding_box.intervals\n assert bounding_box[0] == (-1, 1)\n assert 1 not in bounding_box.intervals\n assert len(bounding_box.intervals) == 1\n\n # Pass single\n model = Gaussian1D()\n bounding_box = ModelBoundingBox({}, model)\n\n assert 'x' not in bounding_box\n bounding_box._validate((-1, 1))\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert len(bounding_box.intervals) == 1\n\n # Model set support\n model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)\n bounding_box = ModelBoundingBox({}, model)\n sequence = (np.array([-1, -2]), np.array([1, 2]))\n assert 'x' not in bounding_box\n bounding_box._validate(sequence)\n assert 'x' in bounding_box\n assert (bounding_box['x'].lower == np.array([-1, -2])).all()\n assert (bounding_box['x'].upper == np.array([1, 2])).all()\n\n def test_validate(self):\n model = Gaussian2D()\n kwargs = {'test': mk.MagicMock()}\n\n # Pass sequence Default order\n bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)\n assert (bounding_box._model.parameters == model.parameters).all()\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n\n # Pass sequence\n bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), order='F', **kwargs)\n assert (bounding_box._model.parameters == model.parameters).all()\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-4, 4)\n assert 'y' in bounding_box\n assert bounding_box['y'] == (-1, 1)\n assert len(bounding_box.intervals) == 2\n\n # Pass Dict\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n bounding_box = ModelBoundingBox.validate(model, intervals, order='F', **kwargs)\n assert (bounding_box._model.parameters == model.parameters).all()\n assert 0 in bounding_box\n assert bounding_box[0] == (-1, 1)\n assert 1 in bounding_box\n assert bounding_box[1] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n assert bounding_box.order == 'F'\n\n # Pass ModelBoundingBox\n bbox = bounding_box\n bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)\n assert (bounding_box._model.parameters == model.parameters).all()\n assert 0 in bounding_box\n assert bounding_box[0] == (-1, 1)\n assert 1 in bounding_box\n assert bounding_box[1] == (-4, 4)\n assert len(bounding_box.intervals) == 2\n assert bounding_box.order == 'F'\n\n # Pass single ignored\n intervals = {0: _Interval(-1, 1)}\n bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'], **kwargs)\n assert (bounding_box._model.parameters == model.parameters).all()\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert 'y' in bounding_box\n assert bounding_box['y'] == _ignored_interval\n assert len(bounding_box.intervals) == 1\n\n # Pass single\n bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)\n assert (bounding_box._model.parameters == Gaussian1D().parameters).all()\n assert 'x' in bounding_box\n assert bounding_box['x'] == (-1, 1)\n assert len(bounding_box.intervals) == 1\n\n # Model set support\n model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)\n sequence = (np.array([-1, -2]), np.array([1, 2]))\n bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)\n assert 'x' in bounding_box\n assert (bounding_box['x'].lower == np.array([-1, -2])).all()\n assert (bounding_box['x'].upper == np.array([1, 2])).all()\n\n def test_fix_inputs(self):\n bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))\n\n # keep_ignored = False (default)\n new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})\n assert not (bounding_box == new_bounding_box)\n\n assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()\n assert 'x' in new_bounding_box\n assert new_bounding_box['x'] == (-1, 1)\n assert 'y' not in new_bounding_box\n assert len(new_bounding_box.intervals) == 1\n assert new_bounding_box.ignored == []\n\n # keep_ignored = True\n new_bounding_box = bounding_box.fix_inputs(Gaussian2D(), {1: mk.MagicMock()},\n _keep_ignored=True)\n assert not (bounding_box == new_bounding_box)\n\n assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()\n assert 'x' in new_bounding_box\n assert new_bounding_box['x'] == (-1, 1)\n assert 'y' in new_bounding_box\n assert 'y' in new_bounding_box.ignored_inputs\n assert len(new_bounding_box.intervals) == 1\n assert new_bounding_box.ignored == [1]\n\n def test_dimension(self):\n intervals = {0: _Interval(-1, 1)}\n model = Gaussian1D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert bounding_box.dimension == 1 == len(bounding_box._intervals)\n\n intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n assert bounding_box.dimension == 2 == len(bounding_box._intervals)\n\n bounding_box._intervals = {}\n assert bounding_box.dimension == 0 == len(bounding_box._intervals)\n\n def test_domain(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # test defaults\n assert (np.array(bounding_box.domain(0.25)) ==\n np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()\n\n # test C order\n assert (np.array(bounding_box.domain(0.25, 'C')) ==\n np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()\n\n # test Fortran order\n assert (np.array(bounding_box.domain(0.25, 'F')) ==\n np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])).all()\n\n # test error order\n order = mk.MagicMock()\n with pytest.raises(ValueError):\n bounding_box.domain(0.25, order)\n\n def test__outside(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # Normal array input, all inside\n x = np.linspace(-1, 1, 13)\n y = np.linspace(0, 2, 13)\n input_shape = x.shape\n inputs = (x, y)\n outside_index, all_out = bounding_box._outside(input_shape, inputs)\n assert (outside_index == [False for _ in range(13)]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Normal array input, some inside and some outside\n x = np.linspace(-2, 1, 13)\n y = np.linspace(0, 3, 13)\n input_shape = x.shape\n inputs = (x, y)\n outside_index, all_out = bounding_box._outside(input_shape, inputs)\n assert (outside_index ==\n [True, True, True, True,\n False, False, False, False, False,\n True, True, True, True]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Normal array input, all outside\n x = np.linspace(2, 3, 13)\n y = np.linspace(-2, -1, 13)\n input_shape = x.shape\n inputs = (x, y)\n outside_index, all_out = bounding_box._outside(input_shape, inputs)\n assert (outside_index == [True for _ in range(13)]).all()\n assert all_out and isinstance(all_out, bool)\n\n # Scalar input inside bounding_box\n inputs = (0.5, 0.5)\n input_shape = (1,)\n outside_index, all_out = bounding_box._outside(input_shape, inputs)\n assert (outside_index == [False]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Scalar input outside bounding_box\n inputs = (2, -1)\n input_shape = (1,)\n outside_index, all_out = bounding_box._outside(input_shape, inputs)\n assert (outside_index == [True]).all()\n assert all_out and isinstance(all_out, bool)\n\n def test__valid_index(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # Normal array input, all inside\n x = np.linspace(-1, 1, 13)\n y = np.linspace(0, 2, 13)\n input_shape = x.shape\n inputs = (x, y)\n valid_index, all_out = bounding_box._valid_index(input_shape, inputs)\n assert len(valid_index) == 1\n assert (valid_index[0] == [idx for idx in range(13)]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Normal array input, some inside and some outside\n x = np.linspace(-2, 1, 13)\n y = np.linspace(0, 3, 13)\n input_shape = x.shape\n inputs = (x, y)\n valid_index, all_out = bounding_box._valid_index(input_shape, inputs)\n assert len(valid_index) == 1\n assert (valid_index[0] == [4, 5, 6, 7, 8]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Normal array input, all outside\n x = np.linspace(2, 3, 13)\n y = np.linspace(-2, -1, 13)\n input_shape = x.shape\n inputs = (x, y)\n valid_index, all_out = bounding_box._valid_index(input_shape, inputs)\n assert len(valid_index) == 1\n assert (valid_index[0] == []).all()\n assert all_out and isinstance(all_out, bool)\n\n # Scalar input inside bounding_box\n inputs = (0.5, 0.5)\n input_shape = (1,)\n valid_index, all_out = bounding_box._valid_index(input_shape, inputs)\n assert len(valid_index) == 1\n assert (valid_index[0] == [0]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Scalar input outside bounding_box\n inputs = (2, -1)\n input_shape = (1,)\n valid_index, all_out = bounding_box._valid_index(input_shape, inputs)\n assert len(valid_index) == 1\n assert (valid_index[0] == []).all()\n assert all_out and isinstance(all_out, bool)\n\n def test_prepare_inputs(self):\n intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}\n model = Gaussian2D()\n bounding_box = ModelBoundingBox.validate(model, intervals)\n\n # Normal array input, all inside\n x = np.linspace(-1, 1, 13)\n y = np.linspace(0, 2, 13)\n input_shape = x.shape\n inputs = (x, y)\n new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)\n assert (np.array(new_inputs) == np.array(inputs)).all()\n assert len(valid_index) == 1\n assert (valid_index[0] == [idx for idx in range(13)]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Normal array input, some inside and some outside\n x = np.linspace(-2, 1, 13)\n y = np.linspace(0, 3, 13)\n input_shape = x.shape\n inputs = (x, y)\n new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)\n assert (np.array(new_inputs) ==\n np.array(\n [\n [x[4], x[5], x[6], x[7], x[8]],\n [y[4], y[5], y[6], y[7], y[8]],\n ]\n )).all()\n assert len(valid_index) == 1\n assert (valid_index[0] == [4, 5, 6, 7, 8]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Normal array input, all outside\n x = np.linspace(2, 3, 13)\n y = np.linspace(-2, -1, 13)\n input_shape = x.shape\n inputs = (x, y)\n new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)\n assert new_inputs == ()\n assert len(valid_index) == 1\n assert (valid_index[0] == []).all()\n assert all_out and isinstance(all_out, bool)\n\n # Scalar input inside bounding_box\n inputs = (0.5, 0.5)\n input_shape = (1,)\n new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)\n assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()\n assert len(valid_index) == 1\n assert (valid_index[0] == [0]).all()\n assert not all_out and isinstance(all_out, bool)\n\n # Scalar input outside bounding_box\n inputs = (2, -1)\n input_shape = (1,)\n new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)\n assert new_inputs == ()\n assert len(valid_index) == 1\n assert (valid_index[0] == []).all()\n assert all_out and isinstance(all_out, bool)\n\n def test_bounding_box_ignore(self):\n \"\"\"Regression test for #13028\"\"\"\n\n bbox_x = ModelBoundingBox((9, 10), Polynomial2D(1), ignored=[\"x\"])\n assert bbox_x.ignored_inputs == ['x']\n\n bbox_y = ModelBoundingBox((11, 12), Polynomial2D(1), ignored=[\"y\"])\n assert bbox_y.ignored_inputs == ['y']\n\n\nclass Test_SelectorArgument:\n def test_create(self):\n index = mk.MagicMock()\n ignore = mk.MagicMock()\n argument = _SelectorArgument(index, ignore)\n\n assert isinstance(argument, _BaseSelectorArgument)\n assert argument.index == index\n assert argument.ignore == ignore\n assert argument == (index, ignore)\n\n def test_validate(self):\n model = Gaussian2D()\n\n # default integer\n assert _SelectorArgument.validate(model, 0) == (0, True)\n assert _SelectorArgument.validate(model, 1) == (1, True)\n\n # default string\n assert _SelectorArgument.validate(model, 'x') == (0, True)\n assert _SelectorArgument.validate(model, 'y') == (1, True)\n\n ignore = mk.MagicMock()\n # non-default integer\n assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)\n assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)\n\n # non-default string\n assert _SelectorArgument.validate(model, 'x', ignore) == (0, ignore)\n assert _SelectorArgument.validate(model, 'y', ignore) == (1, ignore)\n\n # Fail\n with pytest.raises(ValueError):\n _SelectorArgument.validate(model, 'z')\n with pytest.raises(ValueError):\n _SelectorArgument.validate(model, mk.MagicMock())\n with pytest.raises(IndexError):\n _SelectorArgument.validate(model, 2)\n\n def test_get_selector(self):\n # single inputs\n inputs = [idx + 17 for idx in range(3)]\n for index in range(3):\n assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]\n\n # numpy array of single inputs\n inputs = [np.array([idx + 11]) for idx in range(3)]\n for index in range(3):\n assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]\n inputs = [np.asanyarray(idx + 13) for idx in range(3)]\n for index in range(3):\n assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]\n\n # multi entry numpy array\n inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]\n for index in range(3):\n assert _SelectorArgument(index,\n mk.MagicMock()).get_selector(*inputs) == tuple(inputs[index])\n\n def test_name(self):\n model = Gaussian2D()\n for index in range(model.n_inputs):\n assert _SelectorArgument(index, mk.MagicMock()).name(model) == model.inputs[index]\n\n def test_pretty_repr(self):\n model = Gaussian2D()\n\n assert _SelectorArgument(0, False).pretty_repr(model) == \"Argument(name='x', ignore=False)\"\n assert _SelectorArgument(0, True).pretty_repr(model) == \"Argument(name='x', ignore=True)\"\n assert _SelectorArgument(1, False).pretty_repr(model) == \"Argument(name='y', ignore=False)\"\n assert _SelectorArgument(1, True).pretty_repr(model) == \"Argument(name='y', ignore=True)\"\n\n def test_get_fixed_value(self):\n model = Gaussian2D()\n values = {0: 5, 'y': 7}\n\n # Get index value\n assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5\n\n # Get name value\n assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7\n\n # Fail\n values = {0: 5}\n with pytest.raises(RuntimeError) as err:\n _SelectorArgument(1, True).get_fixed_value(model, values)\n assert str(err.value) == \"Argument(name='y', ignore=True) was not found in {0: 5}\"\n\n def test_is_argument(self):\n model = Gaussian2D()\n argument = _SelectorArgument.validate(model, 0)\n\n # Is true\n assert argument.is_argument(model, 0) is True\n assert argument.is_argument(model, 'x') is True\n\n # Is false\n assert argument.is_argument(model, 1) is False\n assert argument.is_argument(model, 'y') is False\n\n # Fail\n with pytest.raises(ValueError):\n argument.is_argument(model, 'z')\n with pytest.raises(ValueError):\n argument.is_argument(model, mk.MagicMock())\n with pytest.raises(IndexError):\n argument.is_argument(model, 2)\n\n def test_named_tuple(self):\n model = Gaussian2D()\n for index in range(model.n_inputs):\n ignore = mk.MagicMock()\n assert _SelectorArgument(index, ignore).named_tuple(model) == (model.inputs[index],\n ignore)\n\n\nclass Test_SelectorArguments:\n def test_create(self):\n arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))\n assert isinstance(arguments, _SelectorArguments)\n assert arguments == ((0, True), (1, False))\n assert arguments._kept_ignore == []\n\n kept_ignore = mk.MagicMock()\n arguments = _SelectorArguments((_SelectorArgument(0, True),\n _SelectorArgument(1, False)), kept_ignore)\n assert isinstance(arguments, _SelectorArguments)\n assert arguments == ((0, True), (1, False))\n assert arguments._kept_ignore == kept_ignore\n\n def test_pretty_repr(self):\n model = Gaussian2D()\n arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))\n\n assert arguments.pretty_repr(model) == (\n \"SelectorArguments(\\n\"\n \" Argument(name='x', ignore=True)\\n\"\n \" Argument(name='y', ignore=False)\\n\"\n \")\"\n )\n\n def test_ignore(self):\n assert _SelectorArguments((_SelectorArgument(0, True),\n _SelectorArgument(1, True))).ignore == [0, 1]\n assert _SelectorArguments((_SelectorArgument(0, True),\n _SelectorArgument(1, True)), [13, 4]).ignore == [0, 1, 13, 4]\n assert _SelectorArguments((_SelectorArgument(0, True),\n _SelectorArgument(1, False))).ignore == [0]\n assert _SelectorArguments((_SelectorArgument(0, False),\n _SelectorArgument(1, True))).ignore == [1]\n assert _SelectorArguments((_SelectorArgument(0, False),\n _SelectorArgument(1, False))).ignore == []\n assert _SelectorArguments((_SelectorArgument(0, False),\n _SelectorArgument(1, False)), [17, 14]).ignore == [17, 14]\n\n def test_validate(self):\n # Integer key and passed ignore\n arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))\n assert isinstance(arguments, _SelectorArguments)\n assert arguments == ((0, True), (1, False))\n assert arguments.kept_ignore == []\n\n # Default ignore\n arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))\n assert isinstance(arguments, _SelectorArguments)\n assert arguments == ((0, True), (1, True))\n assert arguments.kept_ignore == []\n\n # String key and passed ignore\n arguments = _SelectorArguments.validate(Gaussian2D(), (('x', True), ('y', False)))\n assert isinstance(arguments, _SelectorArguments)\n assert arguments == ((0, True), (1, False))\n assert arguments.kept_ignore == []\n\n # Test kept_ignore option\n new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])\n assert isinstance(new_arguments, _SelectorArguments)\n assert new_arguments == ((0, True), (1, False))\n assert new_arguments.kept_ignore == [11, 5, 8]\n\n arguments._kept_ignore = [13, 17, 14]\n new_arguments = _SelectorArguments.validate(Gaussian2D(), arguments)\n assert isinstance(new_arguments, _SelectorArguments)\n assert new_arguments == ((0, True), (1, False))\n assert new_arguments.kept_ignore == [13, 17, 14]\n\n # Invalid, bad argument\n with pytest.raises(ValueError):\n _SelectorArguments.validate(Gaussian2D(), ((0, True), ('z', False)))\n with pytest.raises(ValueError):\n _SelectorArguments.validate(Gaussian2D(), ((mk.MagicMock(), True), (1, False)))\n with pytest.raises(IndexError):\n _SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))\n\n # Invalid, repeated argument\n with pytest.raises(ValueError) as err:\n _SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))\n assert str(err.value) == \"Input: 'x' has been repeated.\"\n\n # Invalid, no arguments\n with pytest.raises(ValueError) as err:\n _SelectorArguments.validate(Gaussian2D(), ())\n assert str(err.value) == \"There must be at least one selector argument.\"\n\n def test_get_selector(self):\n inputs = [idx + 19 for idx in range(4)]\n\n assert _SelectorArguments.validate(Gaussian2D(),\n ((0, True),\n (1, False))).get_selector(*inputs) == tuple(inputs[:2])\n assert _SelectorArguments.validate(Gaussian2D(),\n ((1, True),\n (0, False))).get_selector(*inputs) == tuple(inputs[:2][::-1]) # noqa: E501\n assert _SelectorArguments.validate(Gaussian2D(),\n ((1, False),)).get_selector(*inputs) == (inputs[1],)\n assert _SelectorArguments.validate(Gaussian2D(),\n ((0, True),)).get_selector(*inputs) == (inputs[0],)\n\n def test_is_selector(self):\n # Is Selector\n assert _SelectorArguments.validate(Gaussian2D(),\n ((0, True), (1, False))).is_selector((0.5, 2.5))\n assert _SelectorArguments.validate(Gaussian2D(),\n ((0, True),)).is_selector((0.5,))\n\n # Is not selector\n assert not _SelectorArguments.validate(Gaussian2D(),\n ((0, True), (1, False))).is_selector((0.5, 2.5, 3.5))\n assert not _SelectorArguments.validate(Gaussian2D(),\n ((0, True), (1, False))).is_selector((0.5,))\n assert not _SelectorArguments.validate(Gaussian2D(),\n ((0, True), (1, False))).is_selector(0.5)\n assert not _SelectorArguments.validate(Gaussian2D(),\n ((0, True),)).is_selector((0.5, 2.5))\n assert not _SelectorArguments.validate(Gaussian2D(),\n ((0, True),)).is_selector(2.5)\n\n def test_get_fixed_values(self):\n model = Gaussian2D()\n\n assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(\n model, {0: 11, 1: 7}) == (11, 7)\n assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(\n model, {0: 5, 'y': 47}) == (5, 47)\n assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(\n model, {'x': 2, 'y': 9}) == (2, 9)\n assert _SelectorArguments.validate(model, ((0, True), (1, False))).get_fixed_values(\n model, {'x': 12, 1: 19}) == (12, 19)\n\n def test_is_argument(self):\n model = Gaussian2D()\n\n # Is true\n arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))\n assert arguments.is_argument(model, 0) is True\n assert arguments.is_argument(model, 'x') is True\n assert arguments.is_argument(model, 1) is True\n assert arguments.is_argument(model, 'y') is True\n\n # Is true and false\n arguments = _SelectorArguments.validate(model, ((0, True),))\n assert arguments.is_argument(model, 0) is True\n assert arguments.is_argument(model, 'x') is True\n assert arguments.is_argument(model, 1) is False\n assert arguments.is_argument(model, 'y') is False\n\n arguments = _SelectorArguments.validate(model, ((1, False),))\n assert arguments.is_argument(model, 0) is False\n assert arguments.is_argument(model, 'x') is False\n assert arguments.is_argument(model, 1) is True\n assert arguments.is_argument(model, 'y') is True\n\n def test_selector_index(self):\n model = Gaussian2D()\n\n arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))\n assert arguments.selector_index(model, 0) == 0\n assert arguments.selector_index(model, 'x') == 0\n assert arguments.selector_index(model, 1) == 1\n assert arguments.selector_index(model, 'y') == 1\n\n arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))\n assert arguments.selector_index(model, 0) == 1\n assert arguments.selector_index(model, 'x') == 1\n assert arguments.selector_index(model, 1) == 0\n assert arguments.selector_index(model, 'y') == 0\n\n # Error\n arguments = _SelectorArguments.validate(model, ((0, True),))\n with pytest.raises(ValueError) as err:\n arguments.selector_index(model, 'y')\n assert str(err.value) == \"y does not correspond to any selector argument.\"\n\n def test_add_ignore(self):\n model = Gaussian2D()\n\n arguments = _SelectorArguments.validate(model, ((0, True), ))\n assert arguments == ((0, True),)\n assert arguments._kept_ignore == []\n\n new_arguments0 = arguments.add_ignore(model, 1)\n assert new_arguments0 == arguments\n assert new_arguments0._kept_ignore == [1]\n assert arguments._kept_ignore == []\n\n assert arguments._kept_ignore == []\n new_arguments1 = new_arguments0.add_ignore(model, 'y')\n assert new_arguments1 == arguments == new_arguments0\n assert new_arguments0._kept_ignore == [1]\n assert new_arguments1._kept_ignore == [1, 1]\n assert arguments._kept_ignore == []\n\n # Error\n with pytest.raises(ValueError) as err:\n arguments.add_ignore(model, 0)\n assert str(err.value) == \"0: is a selector argument and cannot be ignored.\"\n\n def test_reduce(self):\n model = Gaussian2D()\n\n arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))\n\n new_arguments = arguments.reduce(model, 0)\n assert isinstance(new_arguments, _SelectorArguments)\n assert new_arguments == ((1, False),)\n assert new_arguments._kept_ignore == [0]\n assert arguments._kept_ignore == []\n\n new_arguments = arguments.reduce(model, 'x')\n assert isinstance(new_arguments, _SelectorArguments)\n assert new_arguments == ((1, False),)\n assert new_arguments._kept_ignore == [0]\n assert arguments._kept_ignore == []\n\n new_arguments = arguments.reduce(model, 1)\n assert isinstance(new_arguments, _SelectorArguments)\n assert new_arguments == ((0, True),)\n assert new_arguments._kept_ignore == [1]\n assert arguments._kept_ignore == []\n\n new_arguments = arguments.reduce(model, 'y')\n assert isinstance(new_arguments, _SelectorArguments)\n assert new_arguments == ((0, True),)\n assert new_arguments._kept_ignore == [1]\n assert arguments._kept_ignore == []\n\n def test_named_tuple(self):\n model = Gaussian2D()\n\n arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))\n assert arguments.named_tuple(model) == (('x', True), ('y', False))\n\n\nclass TestCompoundBoundingBox:\n def test_create(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n create_selector = mk.MagicMock()\n\n bounding_box = CompoundBoundingBox(bounding_boxes, model,\n selector_args, create_selector, order='F')\n assert (bounding_box._model.parameters == model.parameters).all()\n assert bounding_box._selector_args == selector_args\n for _selector, bbox in bounding_boxes.items():\n assert _selector in bounding_box._bounding_boxes\n assert bounding_box._bounding_boxes[_selector] == bbox\n for _selector, bbox in bounding_box._bounding_boxes.items():\n assert _selector in bounding_boxes\n assert bounding_boxes[_selector] == bbox\n assert isinstance(bbox, ModelBoundingBox)\n assert bounding_box._bounding_boxes == bounding_boxes\n assert bounding_box._create_selector == create_selector\n assert bounding_box._order == 'F'\n\n def test_copy(self):\n bounding_box = CompoundBoundingBox.validate(Gaussian2D(),\n {(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},\n ((0, True),), mk.MagicMock())\n copy = bounding_box.copy()\n\n assert bounding_box == copy\n assert id(bounding_box) != id(copy)\n\n # model is not copied to prevent infinite recursion\n assert bounding_box._model == copy._model\n assert id(bounding_box._model) == id(copy._model)\n\n # Same string values have will have same id\n assert bounding_box._order == copy._order\n assert id(bounding_box._order) == id(copy._order)\n\n assert bounding_box._create_selector == copy._create_selector\n assert id(bounding_box._create_selector) != id(copy._create_selector)\n\n # Check selector_args\n for index, argument in enumerate(bounding_box.selector_args):\n assert argument == copy.selector_args[index]\n assert id(argument) != id(copy.selector_args[index])\n\n # Same integer values have will have same id\n assert argument.index == copy.selector_args[index].index\n assert id(argument.index) == id(copy.selector_args[index].index)\n\n # Same boolean values have will have same id\n assert argument.ignore == copy.selector_args[index].ignore\n assert id(argument.ignore) == id(copy.selector_args[index].ignore)\n assert len(bounding_box.selector_args) == len(copy.selector_args)\n\n # Check bounding_boxes\n for selector, bbox in bounding_box.bounding_boxes.items():\n assert bbox == copy.bounding_boxes[selector]\n assert id(bbox) != id(copy.bounding_boxes[selector])\n\n assert bbox.ignored == copy.bounding_boxes[selector].ignored\n assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)\n\n # model is not copied to prevent infinite recursion\n assert bbox._model == copy.bounding_boxes[selector]._model\n assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)\n\n # Same string values have will have same id\n assert bbox._order == copy.bounding_boxes[selector]._order\n assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)\n\n # Check interval objects\n for index, interval in bbox.intervals.items():\n assert interval == copy.bounding_boxes[selector].intervals[index]\n assert id(interval) != id(copy.bounding_boxes[selector].intervals[index])\n\n # Same float values have will have same id\n assert interval.lower == copy.bounding_boxes[selector].intervals[index].lower\n assert id(interval.lower) == id(copy.bounding_boxes[selector].intervals[index].lower) # noqa: E501\n\n # Same float values have will have same id\n assert interval.upper == copy.bounding_boxes[selector].intervals[index].upper\n assert id(interval.upper) == id(copy.bounding_boxes[selector].intervals[index].upper) # noqa: E501\n assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)\n assert bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()\n assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)\n assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()\n\n def test___repr__(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)\n\n assert bounding_box.__repr__() == (\n \"CompoundBoundingBox(\\n\"\n \" bounding_boxes={\\n\"\n \" (1,) = ModelBoundingBox(\\n\"\n \" intervals={\\n\"\n \" y: Interval(lower=-1, upper=1)\\n\"\n \" }\\n\"\n \" ignored=['x']\\n\"\n \" model=Gaussian2D(inputs=('x', 'y'))\\n\"\n \" order='C'\\n\"\n \" )\\n\"\n \" (2,) = ModelBoundingBox(\\n\"\n \" intervals={\\n\"\n \" y: Interval(lower=-2, upper=2)\\n\"\n \" }\\n\"\n \" ignored=['x']\\n\"\n \" model=Gaussian2D(inputs=('x', 'y'))\\n\"\n \" order='C'\\n\"\n \" )\\n\"\n \" }\\n\"\n \" selector_args = SelectorArguments(\\n\"\n \" Argument(name='x', ignore=True)\\n\"\n \" )\\n\"\n \")\"\n )\n\n def test_bounding_boxes(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)\n\n assert bounding_box._bounding_boxes == bounding_boxes\n assert bounding_box.bounding_boxes == bounding_boxes\n\n def test_selector_args(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_box = CompoundBoundingBox({}, model, selector_args)\n\n # Get\n assert bounding_box._selector_args == selector_args\n assert bounding_box.selector_args == selector_args\n\n # Set\n selector_args = ((1, False),)\n with pytest.warns(RuntimeWarning, match=r\"Overriding selector_args.*\"):\n bounding_box.selector_args = selector_args\n assert bounding_box._selector_args == selector_args\n assert bounding_box.selector_args == selector_args\n\n def test_create_selector(self):\n model = Gaussian2D()\n create_selector = mk.MagicMock()\n bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)\n\n assert bounding_box._create_selector == create_selector\n assert bounding_box.create_selector == create_selector\n\n def test__get_selector_key(self):\n bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))\n assert len(bounding_box.bounding_boxes) == 0\n\n # Singlar\n assert bounding_box._get_selector_key(5) == (5,)\n assert bounding_box._get_selector_key((5,)) == (5,)\n assert bounding_box._get_selector_key([5]) == (5,)\n assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)\n assert bounding_box._get_selector_key(np.array([5])) == (5,)\n\n # multiple\n assert bounding_box._get_selector_key((5, 19)) == (5, 19)\n assert bounding_box._get_selector_key([5, 19]) == (5, 19)\n assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)\n\n def test___setitem__(self):\n model = Gaussian2D()\n\n # Ignored argument\n bounding_box = CompoundBoundingBox({}, model, ((1, True),), order='F')\n assert len(bounding_box.bounding_boxes) == 0\n # Valid\n bounding_box[(15, )] = (-15, 15)\n assert len(bounding_box.bounding_boxes) == 1\n assert (15,) in bounding_box._bounding_boxes\n assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)\n assert bounding_box._bounding_boxes[(15,)] == (-15, 15)\n assert bounding_box._bounding_boxes[(15,)].order == 'F'\n # Invalid key\n assert (7, 13) not in bounding_box._bounding_boxes\n with pytest.raises(ValueError) as err:\n bounding_box[(7, 13)] = (-7, 7)\n assert str(err.value) == \"(7, 13) is not a selector!\"\n assert (7, 13) not in bounding_box._bounding_boxes\n assert len(bounding_box.bounding_boxes) == 1\n # Invalid bounding box\n assert 13 not in bounding_box._bounding_boxes\n with pytest.raises(ValueError):\n bounding_box[(13,)] = ((-13, 13), (-3, 3))\n assert 13 not in bounding_box._bounding_boxes\n assert len(bounding_box.bounding_boxes) == 1\n\n # No ignored argument\n bounding_box = CompoundBoundingBox({}, model, ((1, False),), order='F')\n assert len(bounding_box.bounding_boxes) == 0\n # Valid\n bounding_box[(15, )] = ((-15, 15), (-6, 6))\n assert len(bounding_box.bounding_boxes) == 1\n assert (15,) in bounding_box._bounding_boxes\n assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)\n assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))\n assert bounding_box._bounding_boxes[(15,)].order == 'F'\n # Invalid key\n assert (14, 11) not in bounding_box._bounding_boxes\n with pytest.raises(ValueError) as err:\n bounding_box[(14, 11)] = ((-7, 7), (-12, 12))\n assert str(err.value) == \"(14, 11) is not a selector!\"\n assert (14, 11) not in bounding_box._bounding_boxes\n assert len(bounding_box.bounding_boxes) == 1\n # Invalid bounding box\n assert 13 not in bounding_box._bounding_boxes\n with pytest.raises(ValueError):\n bounding_box[(13,)] = (-13, 13)\n assert 13 not in bounding_box._bounding_boxes\n assert len(bounding_box.bounding_boxes) == 1\n\n def test__validate(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n\n # Tuple selector_args\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox({}, model, selector_args)\n bounding_box._validate(bounding_boxes)\n for _selector, bbox in bounding_boxes.items():\n assert _selector in bounding_box._bounding_boxes\n assert bounding_box._bounding_boxes[_selector] == bbox\n for _selector, bbox in bounding_box._bounding_boxes.items():\n assert _selector in bounding_boxes\n assert bounding_boxes[_selector] == bbox\n assert isinstance(bbox, ModelBoundingBox)\n assert bounding_box._bounding_boxes == bounding_boxes\n\n def test___eq__(self):\n bounding_box_1 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)},\n Gaussian2D(), ((0, True),))\n bounding_box_2 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)},\n Gaussian2D(), ((0, True),))\n\n # Equal\n assert bounding_box_1 == bounding_box_2\n\n # Not equal to non-compound bounding_box\n assert not bounding_box_1 == mk.MagicMock()\n assert not bounding_box_2 == mk.MagicMock()\n\n # Not equal bounding_boxes\n bounding_box_2[(15,)] = (-15, 15)\n assert not bounding_box_1 == bounding_box_2\n del bounding_box_2._bounding_boxes[(15,)]\n assert bounding_box_1 == bounding_box_2\n\n # Not equal selector_args\n bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, False),))\n assert not bounding_box_1 == bounding_box_2\n bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, True),))\n assert bounding_box_1 == bounding_box_2\n\n # Not equal create_selector\n bounding_box_2._create_selector = mk.MagicMock()\n assert not bounding_box_1 == bounding_box_2\n\n def test_validate(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n create_selector = mk.MagicMock()\n\n # Fail selector_args\n with pytest.raises(ValueError) as err:\n CompoundBoundingBox.validate(model, bounding_boxes)\n assert str(err.value) == (\"Selector arguments must be provided \"\n \"(can be passed as part of bounding_box argument)\")\n\n # Normal validate\n bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,\n create_selector, order='F')\n assert (bounding_box._model.parameters == model.parameters).all()\n assert bounding_box._selector_args == selector_args\n assert bounding_box._bounding_boxes == bounding_boxes\n assert bounding_box._create_selector == create_selector\n assert bounding_box._order == 'F'\n\n # Re-validate\n new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)\n assert bounding_box == new_bounding_box\n assert new_bounding_box._order == 'F'\n\n # Default order\n bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,\n create_selector)\n assert (bounding_box._model.parameters == model.parameters).all()\n assert bounding_box._selector_args == selector_args\n assert bounding_box._bounding_boxes == bounding_boxes\n assert bounding_box._create_selector == create_selector\n assert bounding_box._order == 'C'\n\n def test___contains__(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)\n\n assert (1,) in bounding_box\n assert (2,) in bounding_box\n\n assert (3,) not in bounding_box\n assert 1 not in bounding_box\n assert 2 not in bounding_box\n\n def test__create_bounding_box(self):\n model = Gaussian2D()\n create_selector = mk.MagicMock()\n bounding_box = CompoundBoundingBox({}, model, ((1, False),),\n create_selector)\n\n # Create is successful\n create_selector.return_value = ((-15, 15), (-23, 23))\n assert len(bounding_box._bounding_boxes) == 0\n bbox = bounding_box._create_bounding_box((7,))\n assert isinstance(bbox, ModelBoundingBox)\n assert bbox == ((-15, 15), (-23, 23))\n assert len(bounding_box._bounding_boxes) == 1\n assert (7,) in bounding_box\n assert isinstance(bounding_box[(7,)], ModelBoundingBox)\n assert bounding_box[(7,)] == bbox\n\n # Create is unsuccessful\n create_selector.return_value = (-42, 42)\n with pytest.raises(ValueError):\n bounding_box._create_bounding_box((27,))\n\n def test___getitem__(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)\n\n # already exists\n assert isinstance(bounding_box[1], ModelBoundingBox)\n assert bounding_box[1] == (-1, 1)\n assert isinstance(bounding_box[(2,)], ModelBoundingBox)\n assert bounding_box[2] == (-2, 2)\n assert isinstance(bounding_box[(1,)], ModelBoundingBox)\n assert bounding_box[(1,)] == (-1, 1)\n assert isinstance(bounding_box[(2,)], ModelBoundingBox)\n assert bounding_box[(2,)] == (-2, 2)\n\n # no selector\n with pytest.raises(RuntimeError) as err:\n bounding_box[(3,)]\n assert str(err.value) == \"No bounding box is defined for selector: (3,).\"\n\n # Create a selector\n bounding_box._create_selector = mk.MagicMock()\n with mk.patch.object(CompoundBoundingBox, '_create_bounding_box',\n autospec=True) as mkCreate:\n assert bounding_box[(3,)] == mkCreate.return_value\n assert mkCreate.call_args_list == [mk.call(bounding_box, (3,))]\n\n def test__select_bounding_box(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)\n\n inputs = [mk.MagicMock() for _ in range(3)]\n with mk.patch.object(_SelectorArguments, 'get_selector',\n autospec=True) as mkSelector:\n with mk.patch.object(CompoundBoundingBox, '__getitem__',\n autospec=True) as mkGet:\n assert bounding_box._select_bounding_box(inputs) == mkGet.return_value\n assert mkGet.call_args_list == [mk.call(bounding_box, mkSelector.return_value)]\n assert mkSelector.call_args_list == [mk.call(bounding_box.selector_args, *inputs)]\n\n def test_prepare_inputs(self):\n model = Gaussian2D()\n selector_args = ((0, True),)\n bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}\n bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)\n\n input_shape = mk.MagicMock()\n with mk.patch.object(ModelBoundingBox, 'prepare_inputs',\n autospec=True) as mkPrepare:\n assert bounding_box.prepare_inputs(input_shape, [1, 2, 3]) == mkPrepare.return_value\n assert mkPrepare.call_args_list == [mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])]\n mkPrepare.reset_mock()\n assert bounding_box.prepare_inputs(input_shape, [2, 2, 3]) == mkPrepare.return_value\n assert mkPrepare.call_args_list == [mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])]\n mkPrepare.reset_mock()\n\n def test__matching_bounding_boxes(self):\n # Single selector index\n selector_args = ((0, False),)\n bounding_boxes = {\n (1,): ((-1, 1), (-2, 2)),\n (2,): ((-2, 2), (-3, 3)),\n (3,): ((-3, 3), (-4, 4))\n }\n bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)\n\n for value in [1, 2, 3]:\n matching = bounding_box._matching_bounding_boxes('x', value)\n assert isinstance(matching, dict)\n assert () in matching\n bbox = matching[()]\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == Gaussian2D().parameters).all()\n assert 'x' in bbox\n assert 'x' in bbox.ignored_inputs\n assert 'y' in bbox\n assert bbox['y'] == (-value, value)\n assert len(bbox.intervals) == 1\n assert bbox.ignored == [0]\n\n # Multiple selector index\n selector_args = ((0, False), (1, False))\n bounding_boxes = {\n (1, 3): ((-1, 1), (-2, 2)),\n (2, 2): ((-2, 2), (-3, 3)),\n (3, 1): ((-3, 3), (-4, 4))\n }\n bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)\n\n for value in [1, 2, 3]:\n matching = bounding_box._matching_bounding_boxes('x', value)\n assert isinstance(matching, dict)\n assert (4 - value,) in matching\n bbox = matching[(4 - value,)]\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == Gaussian2D().parameters).all()\n assert 'x' in bbox\n assert 'x' in bbox.ignored_inputs\n assert 'y' in bbox\n assert bbox['y'] == (-value, value)\n assert len(bbox.intervals) == 1\n assert bbox.ignored == [0]\n\n matching = bounding_box._matching_bounding_boxes('y', value)\n assert isinstance(matching, dict)\n assert (4 - value,) in matching\n bbox = matching[(4 - value,)]\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == Gaussian2D().parameters).all()\n assert 'y' in bbox\n assert 'y' in bbox.ignored_inputs\n assert 'x' in bbox\n assert bbox['x'] == (-(5 - value), (5 - value))\n assert len(bbox.intervals) == 1\n assert bbox.ignored == [1]\n\n # Real fix input of slicing input\n model = Shift(1) & Scale(2) & Identity(1)\n model.inputs = ('x', 'y', 'slit_id')\n bounding_boxes = {\n (0,): ((-0.5, 1047.5), (-0.5, 2047.5)),\n (1,): ((-0.5, 3047.5), (-0.5, 4047.5))\n }\n bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,\n selector_args=[('slit_id', True)], order='F')\n\n matching = bounding_box._matching_bounding_boxes('slit_id', 0)\n assert isinstance(matching, dict)\n assert () in matching\n bbox = matching[()]\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.ignored_inputs == ['slit_id']\n assert bbox.named_intervals == {'x': (-0.5, 1047.5),\n 'y': (-0.5, 2047.5)}\n assert bbox.order == 'F'\n\n matching = bounding_box._matching_bounding_boxes('slit_id', 1)\n assert isinstance(matching, dict)\n assert () in matching\n bbox = matching[()]\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.ignored_inputs == ['slit_id']\n assert bbox.named_intervals == {'x': (-0.5, 3047.5),\n 'y': (-0.5, 4047.5)}\n assert bbox.order == 'F'\n\n # Errors\n with pytest.raises(ValueError) as err:\n bounding_box._matching_bounding_boxes('slit_id', 2)\n assert str(err.value) == (\"Attempting to fix input slit_id, but \"\n \"there are no bounding boxes for argument value 2.\")\n\n def test__fix_input_selector_arg(self):\n # Single selector index\n selector_args = ((0, False),)\n bounding_boxes = {\n (1,): ((-1, 1), (-2, 2)),\n (2,): ((-2, 2), (-3, 3)),\n (3,): ((-3, 3), (-4, 4))\n }\n bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)\n\n for value in [1, 2, 3]:\n bbox = bounding_box._fix_input_selector_arg('x', value)\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == Gaussian2D().parameters).all()\n assert 'x' in bbox\n assert 'x' in bbox.ignored_inputs\n assert 'y' in bbox\n assert bbox['y'] == (-value, value)\n assert len(bbox.intervals) == 1\n assert bbox.ignored == [0]\n\n # Multiple selector index\n selector_args = ((0, False), (1, False))\n bounding_boxes = {\n (1, 3): ((-1, 1), (-2, 2)),\n (2, 2): ((-2, 2), (-3, 3)),\n (3, 1): ((-3, 3), (-4, 4))\n }\n bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)\n\n for value in [1, 2, 3]:\n bbox = bounding_box._fix_input_selector_arg('x', value)\n assert isinstance(bbox, CompoundBoundingBox)\n assert (bbox._model.parameters == Gaussian2D().parameters).all()\n assert bbox.selector_args == ((1, False),)\n assert (4 - value,) in bbox\n bbox_selector = bbox[(4 - value,)]\n assert isinstance(bbox_selector, ModelBoundingBox)\n assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()\n assert 'x' in bbox_selector\n assert 'x' in bbox_selector.ignored_inputs\n assert 'y' in bbox_selector\n assert bbox_selector['y'] == (-value, value)\n assert len(bbox_selector.intervals) == 1\n assert bbox_selector.ignored == [0]\n\n bbox = bounding_box._fix_input_selector_arg('y', value)\n assert isinstance(bbox, CompoundBoundingBox)\n assert (bbox._model.parameters == Gaussian2D().parameters).all()\n assert bbox.selector_args == ((0, False),)\n assert (4 - value,) in bbox\n bbox_selector = bbox[(4 - value,)]\n assert isinstance(bbox_selector, ModelBoundingBox)\n assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()\n assert 'y' in bbox_selector\n assert 'y' in bbox_selector.ignored_inputs\n assert 'x' in bbox_selector\n assert bbox_selector['x'] == (-(5 - value), (5 - value))\n assert len(bbox_selector.intervals) == 1\n assert bbox_selector.ignored == [1]\n\n # Real fix input of slicing input\n model = Shift(1) & Scale(2) & Identity(1)\n model.inputs = ('x', 'y', 'slit_id')\n bounding_boxes = {\n (0,): ((-0.5, 1047.5), (-0.5, 2047.5)),\n (1,): ((-0.5, 3047.5), (-0.5, 4047.5))\n }\n bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,\n selector_args=[('slit_id', True)], order='F')\n\n bbox = bounding_box._fix_input_selector_arg('slit_id', 0)\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.ignored_inputs == ['slit_id']\n assert bbox.named_intervals == {'x': (-0.5, 1047.5),\n 'y': (-0.5, 2047.5)}\n assert bbox.order == 'F'\n\n bbox = bounding_box._fix_input_selector_arg('slit_id', 1)\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.ignored_inputs == ['slit_id']\n assert bbox.named_intervals == {'x': (-0.5, 3047.5),\n 'y': (-0.5, 4047.5)}\n assert bbox.order == 'F'\n\n def test__fix_input_bbox_arg(self):\n model = Shift(1) & Scale(2) & Identity(1)\n model.inputs = ('x', 'y', 'slit_id')\n bounding_boxes = {\n (0,): ((-0.5, 1047.5), (-0.5, 2047.5)),\n (1,): ((-0.5, 3047.5), (-0.5, 4047.5))\n }\n bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,\n selector_args=[('slit_id', True)], order='F')\n\n bbox = bounding_box._fix_input_bbox_arg('x', 5)\n assert isinstance(bbox, CompoundBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.selector_args == ((2, True),)\n assert bbox.selector_args._kept_ignore == [0]\n assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)\n assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)\n assert len(bbox._bounding_boxes) == 2\n\n bbox = bounding_box._fix_input_bbox_arg('y', 5)\n assert isinstance(bbox, CompoundBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.selector_args == ((2, True),)\n assert bbox.selector_args._kept_ignore == [1]\n assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)\n assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)\n assert len(bbox._bounding_boxes) == 2\n\n def test_fix_inputs(self):\n model = Shift(1) & Scale(2) & Identity(1)\n model.inputs = ('x', 'y', 'slit_id')\n bounding_boxes = {\n (0,): ((-0.5, 1047.5), (-0.5, 2047.5)),\n (1,): ((-0.5, 3047.5), (-0.5, 4047.5))\n }\n bounding_box = CompoundBoundingBox.validate(model, bounding_boxes,\n selector_args=[('slit_id', True)], order='F')\n model.bounding_box = bounding_box\n\n # Fix selector argument\n new_model = fix_inputs(model, {'slit_id': 0})\n bbox = new_model.bounding_box\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == new_model.parameters).all()\n assert bbox.ignored_inputs == []\n assert bbox.named_intervals == {'x': (-0.5, 1047.5),\n 'y': (-0.5, 2047.5)}\n assert bbox.order == 'F'\n\n # Fix a bounding_box field\n new_model = fix_inputs(model, {'x': 5})\n bbox = new_model.bounding_box\n assert isinstance(bbox, CompoundBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.selector_args == ((1, True),)\n assert bbox.selector_args._kept_ignore == []\n assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)\n assert bbox._bounding_boxes[(0,)].order == 'F'\n assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)\n assert bbox._bounding_boxes[(1,)].order == 'F'\n assert len(bbox._bounding_boxes) == 2\n new_model = fix_inputs(model, {'y': 5})\n bbox = new_model.bounding_box\n assert isinstance(bbox, CompoundBoundingBox)\n assert (bbox._model.parameters == model.parameters).all()\n assert bbox.selector_args == ((1, True),)\n assert bbox.selector_args._kept_ignore == []\n assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)\n assert bbox._bounding_boxes[(0,)].order == 'F'\n assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)\n assert bbox._bounding_boxes[(1,)].order == 'F'\n assert len(bbox._bounding_boxes) == 2\n\n # Fix selector argument and a bounding_box field\n new_model = fix_inputs(model, {'slit_id': 0, 'x': 5})\n bbox = new_model.bounding_box\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == new_model.parameters).all()\n assert bbox.ignored_inputs == []\n assert bbox.named_intervals == {'y': (-0.5, 2047.5)}\n assert bbox.order == 'F'\n new_model = fix_inputs(model, {'y': 5, 'slit_id': 1})\n bbox = new_model.bounding_box\n assert isinstance(bbox, ModelBoundingBox)\n assert (bbox._model.parameters == new_model.parameters).all()\n assert bbox.ignored_inputs == []\n assert bbox.named_intervals == {'x': (-0.5, 3047.5)}\n assert bbox.order == 'F'\n\n # Fix two bounding_box fields\n new_model = fix_inputs(model, {'x': 5, 'y': 7})\n bbox = new_model.bounding_box\n assert isinstance(bbox, CompoundBoundingBox)\n assert bbox.selector_args == ((0, True),)\n assert bbox.selector_args._kept_ignore == []\n assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)\n assert bbox._bounding_boxes[(0,)].order == 'F'\n assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)\n assert bbox._bounding_boxes[(1,)].order == 'F'\n assert len(bbox._bounding_boxes) == 2\n\n def test_complex_compound_bounding_box(self):\n model = Identity(4)\n bounding_boxes = {\n (2.5, 1.3): ((-1, 1), (-3, 3)),\n (2.5, 2.71): ((-3, 3), (-1, 1))\n }\n selector_args = (('x0', True), ('x1', True))\n\n bbox = CompoundBoundingBox.validate(model, bounding_boxes, selector_args)\n assert bbox[(2.5, 1.3)] == ModelBoundingBox(((-1, 1), (-3, 3)),\n model, ignored=['x0', 'x1'])\n assert bbox[(2.5, 2.71)] == ModelBoundingBox(((-3, 3), (-1, 1)),\n model, ignored=['x0', 'x1'])\n"}}},{"rowIdx":1362,"cells":{"hash":{"kind":"string","value":"faa6758cad40b2f0f052dee47c2dab95990c0fdb8b54726dbd9ff206345a9b73"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"Tests for spline models and fitters\"\"\"\nimport unittest.mock as mk\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom astropy.modeling.core import FittableModel, ModelDefinitionError\nfrom astropy.modeling.fitting import (\n SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter)\nfrom astropy.modeling.parameters import Parameter\nfrom astropy.modeling.spline import Spline1D, _Spline, _SplineFitter\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY # noqa: F401\n# pylint: disable=invalid-name\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nnpts = 50\nnknots = 10\nnp.random.seed(42)\ntest_w = np.random.rand(npts)\ntest_t = [-1, 0, 1]\nnoise = np.random.randn(npts)\n\ndegree_tests = [1, 2, 3, 4, 5]\nwieght_tests = [None, test_w]\nsmoothing_tests = [None, 0.01]\n\n\nclass TestSpline:\n def setup_class(self):\n self.num_opt = 3\n self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}\n self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}\n\n class Spline(_Spline):\n optional_inputs = {'test': 'test'}\n\n def _init_parameters(self):\n super()._init_parameters()\n\n def _init_data(self, knots, coeffs, bounds=None):\n super()._init_data(knots, coeffs, bounds=bounds)\n\n self.Spline = Spline\n\n def test___init__(self):\n # empty spline\n spl = self.Spline()\n assert spl._t is None\n assert spl._c is None\n assert spl._user_knots is False\n assert spl._degree is None\n assert spl._test is None\n\n assert not hasattr(spl, 'degree')\n\n # Call _init_spline\n with mk.patch.object(_Spline, '_init_spline',\n autospec=True) as mkInit:\n # No call (knots=None)\n spl = self.Spline()\n assert mkInit.call_args_list == []\n\n knots = mk.MagicMock()\n coeffs = mk.MagicMock()\n bounds = mk.MagicMock()\n spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)\n assert mkInit.call_args_list == [mk.call(spl, knots, coeffs, bounds)]\n\n assert spl._t is None\n assert spl._c is None\n assert spl._user_knots is False\n assert spl._degree is None\n assert spl._test is None\n\n # Coeffs but no knots\n with pytest.raises(ValueError) as err:\n self.Spline(coeffs=mk.MagicMock())\n assert str(err.value) == \"If one passes a coeffs vector one needs to also pass knots!\"\n\n def test_param_names(self):\n # no parameters\n spl = self.Spline()\n assert spl.param_names == ()\n\n knot_names = tuple(mk.MagicMock() for _ in range(3))\n spl._knot_names = knot_names\n assert spl.param_names == knot_names\n\n coeff_names = tuple(mk.MagicMock() for _ in range(3))\n spl._coeff_names = coeff_names\n assert spl.param_names == knot_names + coeff_names\n\n def test__optional_arg(self):\n\n spl = self.Spline()\n assert spl._optional_arg('test') == '_test'\n\n def test__create_optional_inputs(self):\n class Spline(self.Spline):\n optional_inputs = self.optional_inputs\n\n def __init__(self):\n self._create_optional_inputs()\n\n spl = Spline()\n for arg in self.optional_inputs:\n attribute = spl._optional_arg(arg)\n assert hasattr(spl, attribute)\n assert getattr(spl, attribute) is None\n\n with pytest.raises(ValueError,\n match=r\"Optional argument .* already exists in this class!\"):\n spl._create_optional_inputs()\n\n def test__intercept_optional_inputs(self):\n class Spline(self.Spline):\n optional_inputs = self.optional_inputs\n\n def __init__(self):\n self._create_optional_inputs()\n\n spl = Spline()\n new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)\n for arg, value in self.optional_inputs.items():\n attribute = spl._optional_arg(arg)\n assert getattr(spl, attribute) is None\n assert new_kwargs == self.extra_kwargs\n\n kwargs = self.extra_kwargs.copy()\n for arg in self.optional_inputs:\n kwargs[arg] = mk.MagicMock()\n new_kwargs = spl._intercept_optional_inputs(**kwargs)\n for arg, value in self.optional_inputs.items():\n attribute = spl._optional_arg(arg)\n assert getattr(spl, attribute) is not None\n assert getattr(spl, attribute) == kwargs[arg]\n assert getattr(spl, attribute) != value\n assert arg not in new_kwargs\n assert new_kwargs == self.extra_kwargs\n assert kwargs != self.extra_kwargs\n\n with pytest.raises(RuntimeError,\n match=r\".* has already been set, something has gone wrong!\"):\n spl._intercept_optional_inputs(**kwargs)\n\n def test_evaluate(self):\n class Spline(self.Spline):\n optional_inputs = self.optional_inputs\n\n spl = Spline()\n\n # No options passed in and No options set\n new_kwargs = spl.evaluate(**self.extra_kwargs)\n for arg, value in self.optional_inputs.items():\n assert new_kwargs[arg] == value\n for arg, value in self.extra_kwargs.items():\n assert new_kwargs[arg] == value\n assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))\n\n # No options passed in and Options set\n kwargs = self.extra_kwargs.copy()\n for arg in self.optional_inputs:\n kwargs[arg] = mk.MagicMock()\n spl._intercept_optional_inputs(**kwargs)\n new_kwargs = spl.evaluate(**self.extra_kwargs)\n assert new_kwargs == kwargs\n for arg in self.optional_inputs:\n attribute = spl._optional_arg(arg)\n assert getattr(spl, attribute) is None\n\n # Options passed in\n set_kwargs = self.extra_kwargs.copy()\n for arg in self.optional_inputs:\n kwargs[arg] = mk.MagicMock()\n spl._intercept_optional_inputs(**set_kwargs)\n kwargs = self.extra_kwargs.copy()\n for arg in self.optional_inputs:\n kwargs[arg] = mk.MagicMock()\n assert set_kwargs != kwargs\n new_kwargs = spl.evaluate(**kwargs)\n assert new_kwargs == kwargs\n\n def test___call__(self):\n spl = self.Spline()\n\n args = tuple(mk.MagicMock() for _ in range(3))\n kwargs = {f\"test{idx}\": mk.MagicMock() for idx in range(3)}\n new_kwargs = {f\"new_test{idx}\": mk.MagicMock() for idx in range(3)}\n with mk.patch.object(_Spline, \"_intercept_optional_inputs\",\n autospec=True, return_value=new_kwargs) as mkIntercept:\n with mk.patch.object(FittableModel, \"__call__\",\n autospec=True) as mkCall:\n assert mkCall.return_value == spl(*args, **kwargs)\n assert mkCall.call_args_list == [mk.call(spl, *args, **new_kwargs)]\n assert mkIntercept.call_args_list == [mk.call(spl, **kwargs)]\n\n def test__create_parameter(self):\n np.random.seed(37)\n base_vec = np.random.random(20)\n test = base_vec.copy()\n fixed_test = base_vec.copy()\n\n class Spline(self.Spline):\n @property\n def test(self):\n return test\n\n @property\n def fixed_test(self):\n return fixed_test\n\n spl = Spline()\n assert (spl.test == test).all()\n assert (spl.fixed_test == fixed_test).all()\n\n for index in range(20):\n name = f\"test_name{index}\"\n spl._create_parameter(name, index, 'test')\n assert hasattr(spl, name)\n param = getattr(spl, name)\n assert isinstance(param, Parameter)\n assert param.model == spl\n assert param.fixed is False\n assert param.value == test[index] == spl.test[index] == base_vec[index]\n new_set = np.random.random()\n param.value = new_set\n assert spl.test[index] == new_set\n assert spl.test[index] != base_vec[index]\n new_get = np.random.random()\n spl.test[index] = new_get\n assert param.value == new_get\n assert param.value != new_set\n\n for index in range(20):\n name = f\"fixed_test_name{index}\"\n spl._create_parameter(name, index, 'fixed_test', True)\n assert hasattr(spl, name)\n param = getattr(spl, name)\n assert isinstance(param, Parameter)\n assert param.model == spl\n assert param.fixed is True\n assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]\n new_set = np.random.random()\n param.value = new_set\n assert spl.fixed_test[index] == new_set\n assert spl.fixed_test[index] != base_vec[index]\n new_get = np.random.random()\n spl.fixed_test[index] = new_get\n assert param.value == new_get\n assert param.value != new_set\n\n def test__create_parameters(self):\n np.random.seed(37)\n test = np.random.random(20)\n\n class Spline(self.Spline):\n @property\n def test(self):\n return test\n\n spl = Spline()\n\n fixed = mk.MagicMock()\n with mk.patch.object(_Spline, '_create_parameter',\n autospec=True) as mkCreate:\n params = spl._create_parameters(\"test_param\", \"test\", fixed)\n assert params == tuple(f\"test_param{idx}\" for idx in range(20))\n assert mkCreate.call_args_list == [\n mk.call(spl, f\"test_param{idx}\", idx, 'test', fixed) for idx in range(20)\n ]\n\n def test__init_parameters(self):\n spl = self.Spline()\n\n with pytest.raises(NotImplementedError) as err:\n spl._init_parameters()\n assert str(err.value) == \"This needs to be implemented\"\n\n def test__init_data(self):\n spl = self.Spline()\n\n with pytest.raises(NotImplementedError) as err:\n spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())\n assert str(err.value) == \"This needs to be implemented\"\n\n with pytest.raises(NotImplementedError) as err:\n spl._init_data(mk.MagicMock(), mk.MagicMock())\n assert str(err.value) == \"This needs to be implemented\"\n\n def test__init_spline(self):\n spl = self.Spline()\n\n knots = mk.MagicMock()\n coeffs = mk.MagicMock()\n bounds = mk.MagicMock()\n with mk.patch.object(_Spline, \"_init_parameters\",\n autospec=True) as mkParameters:\n with mk.patch.object(_Spline, \"_init_data\",\n autospec=True) as mkData:\n main = mk.MagicMock()\n main.attach_mock(mkParameters, 'parameters')\n main.attach_mock(mkData, 'data')\n\n spl._init_spline(knots, coeffs, bounds)\n assert main.mock_calls == [\n mk.call.data(spl, knots, coeffs, bounds=bounds),\n mk.call.parameters(spl)\n ]\n\n def test__init_tck(self):\n spl = self.Spline()\n assert spl._c is None\n assert spl._t is None\n assert spl._degree is None\n\n spl = self.Spline(degree=4)\n assert spl._c is None\n assert spl._t is None\n assert spl._degree == 4\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\nclass TestSpline1D:\n def setup_class(self):\n def func(x, noise=0):\n return np.exp(-x**2) + 0.1*noise\n\n self.x = np.linspace(-3, 3, npts)\n self.y = func(self.x, noise)\n self.truth = func(self.x)\n\n arg_sort = np.argsort(self.x)\n np.random.shuffle(arg_sort)\n\n self.x_s = self.x[arg_sort]\n self.y_s = func(self.x_s, noise[arg_sort])\n\n self.npts_out = 1000\n self.xs = np.linspace(-3, 3, self.npts_out)\n\n self.t = np.linspace(-3, 3, nknots)[1:-1]\n\n def check_parameter(self, spl, base_name, name, index, value, fixed):\n assert base_name in name\n assert index == int(name.split(base_name)[-1])\n knot_name = f\"{base_name}{index}\"\n assert knot_name == name\n assert hasattr(spl, name)\n param = getattr(spl, name)\n assert isinstance(param, Parameter)\n assert param.name == name\n assert param.value == value(index)\n assert param.model == spl\n assert param.fixed is fixed\n\n def check_parameters(self, spl, params, base_name, value, fixed):\n for idx, name in enumerate(params):\n self.check_parameter(spl, base_name, name, idx, value, fixed)\n\n def update_parameters(self, spl, knots, value):\n for name in knots:\n param = getattr(spl, name)\n param.value = value\n assert param.value == value\n\n def test___init__with_no_knot_information(self):\n spl = Spline1D()\n assert spl._degree == 3\n assert spl._user_knots is False\n assert spl._t is None\n assert spl._c is None\n assert spl._nu is None\n\n # Check no parameters created\n assert len(spl._knot_names) == 0\n assert len(spl._coeff_names) == 0\n\n def test___init__with_number_of_knots(self):\n spl = Spline1D(knots=10)\n\n # Check baseline data\n assert spl._degree == 3\n assert spl._user_knots is False\n assert spl._nu is None\n\n # Check vector data\n assert len(spl._t) == 18\n t = np.zeros(18)\n t[-4:] = 1\n assert (spl._t == t).all()\n assert len(spl._c) == 18\n assert (spl._c == np.zeros(18)).all()\n\n # Check all parameter names created:\n assert len(spl._knot_names) == 18\n assert len(spl._coeff_names) == 18\n\n # Check knot values:\n def value0(idx):\n if idx < 18 - 4:\n return 0\n else:\n return 1\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n\n # Check coeff values:\n def value1(idx):\n return 0\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n def test___init__with_full_custom_knots(self):\n t = 17*np.arange(20) - 32\n spl = Spline1D(knots=t)\n\n # Check baseline data\n assert spl._degree == 3\n assert spl._user_knots is True\n assert spl._nu is None\n\n # Check vector data\n assert (spl._t == t).all()\n assert len(spl._c) == 20\n assert (spl._c == np.zeros(20)).all()\n\n # Check all parameter names created\n assert len(spl._knot_names) == 20\n assert len(spl._coeff_names) == 20\n\n # Check knot values:\n def value0(idx):\n return t[idx]\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n\n # Check coeff values\n def value1(idx):\n return 0\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n def test___init__with_interior_custom_knots(self):\n t = np.arange(1, 20)\n spl = Spline1D(knots=t, bounds=[0, 20])\n # Check baseline data\n assert spl._degree == 3\n assert spl._user_knots is True\n assert spl._nu is None\n\n # Check vector data\n assert len(spl._t) == 27\n assert (spl._t[4:-4] == t).all()\n assert (spl._t[:4] == 0).all()\n assert (spl._t[-4:] == 20).all()\n\n assert len(spl._c) == 27\n assert (spl._c == np.zeros(27)).all()\n\n # Check knot values:\n def value0(idx):\n if idx < 4:\n return 0\n elif idx >= 19 + 4:\n return 20\n else:\n return t[idx-4]\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n\n # Check coeff values\n def value1(idx):\n return 0\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n def test___init__with_user_knots_and_coefficients(self):\n t = 17*np.arange(20) - 32\n c = np.linspace(-1, 1, 20)\n spl = Spline1D(knots=t, coeffs=c)\n\n # Check baseline data\n assert spl._degree == 3\n assert spl._user_knots is True\n assert spl._nu is None\n\n # Check vector data\n assert (spl._t == t).all()\n assert len(spl._c) == 20\n assert (spl._c == c).all()\n\n # Check all parameter names created\n assert len(spl._knot_names) == 20\n assert len(spl._coeff_names) == 20\n\n # Check knot values:\n def value0(idx):\n return t[idx]\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n\n # Check coeff values\n def value1(idx):\n return c[idx]\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n def test___init__errors(self):\n # Bad knot type\n knots = 3.5\n with pytest.raises(ValueError) as err:\n Spline1D(knots=knots)\n assert str(err.value) == f\"Knots: {knots} must be iterable or value\"\n\n # Not enough knots\n for idx in range(8):\n with pytest.raises(ValueError) as err:\n Spline1D(knots=np.arange(idx))\n assert str(err.value) == \"Must have at least 8 knots.\"\n\n # Bad scipy spline\n t = np.arange(20)[::-1]\n with pytest.raises(ValueError):\n Spline1D(knots=t)\n\n def test_parameter_array_link(self):\n spl = Spline1D(10)\n\n # Check knot base values\n def value0(idx):\n if idx < 18 - 4:\n return 0\n else:\n return 1\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n\n # Check knot vector -> knot parameter link\n t = np.arange(18)\n spl._t = t.copy()\n\n def value1(idx):\n return t[idx]\n self.check_parameters(spl, spl._knot_names, \"knot\", value1, True)\n\n # Check knot parameter -> knot vector link\n self.update_parameters(spl, spl._knot_names, 3)\n assert (spl._t[:] == 3).all()\n\n # Check coeff base values\n def value2(idx):\n return 0\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value2, False)\n\n # Check coeff vector -> coeff parameter link\n c = 5 * np.arange(18) + 18\n spl._c = c.copy()\n\n def value3(idx):\n return c[idx]\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value3, False)\n\n # Check coeff parameter -> coeff vector link\n self.update_parameters(spl, spl._coeff_names, 4)\n assert (spl._c[:] == 4).all()\n\n def test_two_splines(self):\n spl0 = Spline1D(knots=10)\n spl1 = Spline1D(knots=15, degree=2)\n\n assert spl0._degree == 3\n assert len(spl0._t) == 18\n t = np.zeros(18)\n t[-4:] = 1\n assert (spl0._t == t).all()\n assert len(spl0._c) == 18\n assert (spl0._c == np.zeros(18)).all()\n assert spl1._degree == 2\n assert len(spl1._t) == 21\n t = np.zeros(21)\n t[-3:] = 1\n assert (spl1._t == t).all()\n assert len(spl1._c) == 21\n assert (spl1._c == np.zeros(21)).all()\n\n # Check all knot names created\n assert len(spl0._knot_names) == 18\n assert len(spl1._knot_names) == 21\n\n # Check knot base values\n def value0(idx):\n if idx < 18 - 4:\n return 0\n else:\n return 1\n self.check_parameters(spl0, spl0._knot_names, \"knot\", value0, True)\n\n def value1(idx):\n if idx < 21 - 3:\n return 0\n else:\n return 1\n self.check_parameters(spl1, spl1._knot_names, \"knot\", value1, True)\n\n # Check knot vector -> knot parameter link\n t0 = 7 * np.arange(18) + 27\n t1 = 11 * np.arange(21) + 19\n spl0._t[:] = t0.copy()\n spl1._t[:] = t1.copy()\n\n def value2(idx):\n return t0[idx]\n self.check_parameters(spl0, spl0._knot_names, \"knot\", value2, True)\n\n def value3(idx):\n return t1[idx]\n self.check_parameters(spl1, spl1._knot_names, \"knot\", value3, True)\n\n # Check knot parameter -> knot vector link\n self.update_parameters(spl0, spl0._knot_names, 3)\n self.update_parameters(spl1, spl1._knot_names, 4)\n assert (spl0._t[:] == 3).all()\n assert (spl1._t[:] == 4).all()\n\n # Check all coeff names created\n assert len(spl0._coeff_names) == 18\n assert len(spl1._coeff_names) == 21\n\n # Check coeff base values\n def value4(idx):\n return 0\n self.check_parameters(spl0, spl0._coeff_names, \"coeff\", value4, False)\n self.check_parameters(spl1, spl1._coeff_names, \"coeff\", value4, False)\n\n # Check coeff vector -> coeff parameter link\n c0 = 17 * np.arange(18) + 14\n c1 = 37 * np.arange(21) + 47\n spl0._c[:] = c0.copy()\n spl1._c[:] = c1.copy()\n\n def value5(idx):\n return c0[idx]\n self.check_parameters(spl0, spl0._coeff_names, \"coeff\", value5, False)\n\n def value6(idx):\n return c1[idx]\n self.check_parameters(spl1, spl1._coeff_names, \"coeff\", value6, False)\n\n # Check coeff parameter -> coeff vector link\n self.update_parameters(spl0, spl0._coeff_names, 5)\n self.update_parameters(spl1, spl1._coeff_names, 6)\n assert (spl0._t[:] == 3).all()\n assert (spl1._t[:] == 4).all()\n assert (spl0._c[:] == 5).all()\n assert (spl1._c[:] == 6).all()\n\n def test__knot_names(self):\n # no parameters\n spl = Spline1D()\n assert spl._knot_names == ()\n\n # some parameters\n knot_names = [f\"knot{idx}\" for idx in range(18)]\n\n spl = Spline1D(10)\n assert spl._knot_names == tuple(knot_names)\n\n def test__coeff_names(self):\n # no parameters\n spl = Spline1D()\n assert spl._coeff_names == ()\n\n # some parameters\n coeff_names = [f\"coeff{idx}\" for idx in range(18)]\n\n spl = Spline1D(10)\n assert spl._coeff_names == tuple(coeff_names)\n\n def test_param_names(self):\n # no parameters\n spl = Spline1D()\n assert spl.param_names == ()\n\n # some parameters\n knot_names = [f\"knot{idx}\" for idx in range(18)]\n coeff_names = [f\"coeff{idx}\" for idx in range(18)]\n param_names = knot_names + coeff_names\n\n spl = Spline1D(10)\n assert spl.param_names == tuple(param_names)\n\n def test_t(self):\n # no parameters\n spl = Spline1D()\n # test get\n assert spl._t is None\n assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()\n # test set\n with pytest.raises(ValueError) as err:\n spl.t = mk.MagicMock()\n assert str(err.value) == \"The model parameters must be initialized before setting knots.\"\n\n # with parameters\n spl = Spline1D(10)\n # test get\n t = np.zeros(18)\n t[-4:] = 1\n assert (spl._t == t).all()\n assert (spl.t == t).all()\n # test set\n spl.t = (np.arange(18) + 15)\n assert (spl._t == (np.arange(18) + 15)).all()\n assert (spl.t == (np.arange(18) + 15)).all()\n assert (spl.t != t).all()\n # set error\n for idx in range(30):\n if idx == 18:\n continue\n with pytest.raises(ValueError) as err:\n spl.t = np.arange(idx)\n assert str(err.value) == \"There must be exactly as many knots as previously defined.\"\n\n def test_c(self):\n # no parameters\n spl = Spline1D()\n # test get\n assert spl._c is None\n assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()\n # test set\n with pytest.raises(ValueError) as err:\n spl.c = mk.MagicMock()\n assert str(err.value) == \"The model parameters must be initialized before setting coeffs.\"\n\n # with parameters\n spl = Spline1D(10)\n # test get\n assert (spl._c == np.zeros(18)).all()\n assert (spl.c == np.zeros(18)).all()\n # test set\n spl.c = (np.arange(18) + 15)\n assert (spl._c == (np.arange(18) + 15)).all()\n assert (spl.c == (np.arange(18) + 15)).all()\n assert (spl.c != np.zeros(18)).all()\n # set error\n for idx in range(30):\n if idx == 18:\n continue\n with pytest.raises(ValueError) as err:\n spl.c = np.arange(idx)\n assert str(err.value) == \"There must be exactly as many coeffs as previously defined.\"\n\n def test_degree(self):\n # default degree\n spl = Spline1D()\n # test get\n assert spl._degree == 3\n assert spl.degree == 3\n # test set\n\n # non-default degree\n spl = Spline1D(degree=2)\n # test get\n assert spl._degree == 2\n assert spl.degree == 2\n\n def test__initialized(self):\n # no parameters\n spl = Spline1D()\n assert spl._initialized is False\n\n # with parameters\n spl = Spline1D(knots=10, degree=2)\n assert spl._initialized is True\n\n def test_tck(self):\n # no parameters\n spl = Spline1D()\n # test get\n assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()\n assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()\n assert spl.degree == 3\n tck = spl.tck\n assert (tck[0] == spl.t).all()\n assert (tck[1] == spl.c).all()\n assert tck[2] == spl.degree\n # test set\n assert spl._t is None\n assert spl._c is None\n assert spl._knot_names == ()\n assert spl._coeff_names == ()\n t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])\n np.random.seed(619)\n c = np.random.random(12)\n k = 3\n spl.tck = (t, c, k)\n assert (spl._t == t).all()\n assert (spl._c == c).all()\n assert spl.degree == k\n\n def value0(idx):\n return t[idx]\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n\n def value1(idx):\n return c[idx]\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n # with parameters\n spl = Spline1D(knots=10, degree=2)\n # test get\n t = np.zeros(16)\n t[-3:] = 1\n assert (spl.t == t).all()\n assert (spl.c == np.zeros(16)).all()\n assert spl.degree == 2\n tck = spl.tck\n assert (tck[0] == spl.t).all()\n assert (tck[1] == spl.c).all()\n assert tck[2] == spl.degree\n # test set\n t = 5*np.arange(16) + 11\n c = 7*np.arange(16) + 13\n k = 2\n spl.tck = (t, c, k)\n assert (spl.t == t).all()\n assert (spl.c == c).all()\n assert spl.degree == k\n tck = spl.tck\n assert (tck[0] == spl.t).all()\n assert (tck[1] == spl.c).all()\n assert tck[2] == spl.degree\n\n # Error\n with pytest.raises(ValueError) as err:\n spl.tck = (t, c, 4)\n assert str(err.value) == \"tck has incompatible degree!\"\n\n def test_bspline(self):\n from scipy.interpolate import BSpline\n\n # no parameters\n spl = Spline1D()\n bspline = spl.bspline\n\n assert isinstance(bspline, BSpline)\n assert (bspline.tck[0] == spl.tck[0]).all()\n assert (bspline.tck[1] == spl.tck[1]).all()\n assert bspline.tck[2] == spl.tck[2]\n\n t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])\n np.random.seed(619)\n c = np.random.random(12)\n k = 3\n\n def value0(idx):\n return t[idx]\n\n def value1(idx):\n return c[idx]\n\n # set (bspline)\n spl = Spline1D()\n assert spl._t is None\n assert spl._c is None\n assert spl._knot_names == ()\n assert spl._coeff_names == ()\n bspline = BSpline(t, c, k)\n spl.bspline = bspline\n assert (spl._t == t).all()\n assert (spl._c == c).all()\n assert spl.degree == k\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n # set (tuple spline)\n spl = Spline1D()\n assert spl._t is None\n assert spl._c is None\n assert spl._knot_names == ()\n assert spl._coeff_names == ()\n spl.bspline = (t, c, k)\n assert (spl._t == t).all()\n assert (spl._c == c).all()\n assert spl.degree == k\n self.check_parameters(spl, spl._knot_names, \"knot\", value0, True)\n self.check_parameters(spl, spl._coeff_names, \"coeff\", value1, False)\n\n # with parameters\n spl = Spline1D(knots=10, degree=2)\n bspline = spl.bspline\n\n assert isinstance(bspline, BSpline)\n assert (bspline.tck[0] == spl.tck[0]).all()\n assert (bspline.tck[1] == spl.tck[1]).all()\n assert bspline.tck[2] == spl.tck[2]\n\n def test_knots(self):\n # no parameters\n spl = Spline1D()\n assert spl.knots == []\n\n # with parameters\n spl = Spline1D(10)\n knots = spl.knots\n assert len(knots) == 18\n\n for knot in knots:\n assert isinstance(knot, Parameter)\n assert hasattr(spl, knot.name)\n assert getattr(spl, knot.name) == knot\n\n def test_coeffs(self):\n # no parameters\n spl = Spline1D()\n assert spl.coeffs == []\n\n # with parameters\n spl = Spline1D(10)\n coeffs = spl.coeffs\n assert len(coeffs) == 18\n\n for coeff in coeffs:\n assert isinstance(coeff, Parameter)\n assert hasattr(spl, coeff.name)\n assert getattr(spl, coeff.name) == coeff\n\n def test__init_parameters(self):\n spl = Spline1D()\n\n with mk.patch.object(Spline1D, '_create_parameters',\n autospec=True) as mkCreate:\n spl._init_parameters()\n assert mkCreate.call_args_list == [\n mk.call(spl, \"knot\", \"t\", fixed=True),\n mk.call(spl, \"coeff\", \"c\")\n ]\n\n def test__init_bounds(self):\n spl = Spline1D()\n\n has_bounds, lower, upper = spl._init_bounds()\n assert has_bounds is False\n assert (lower == [0, 0, 0, 0]).all()\n assert (upper == [1, 1, 1, 1]).all()\n assert spl._user_bounding_box is None\n\n has_bounds, lower, upper = spl._init_bounds((-5, 5))\n assert has_bounds is True\n assert (lower == [-5, -5, -5, -5]).all()\n assert (upper == [5, 5, 5, 5]).all()\n assert spl._user_bounding_box == (-5, 5)\n\n def test__init_knots(self):\n np.random.seed(19)\n lower = np.random.random(4)\n upper = np.random.random(4)\n\n # Integer\n with mk.patch.object(Spline1D, \"bspline\",\n new_callable=mk.PropertyMock) as mkBspline:\n spl = Spline1D()\n assert spl._t is None\n spl._init_knots(10, mk.MagicMock(), lower, upper)\n t = np.concatenate((lower, np.zeros(10), upper))\n assert (spl._t == t).all()\n assert mkBspline.call_args_list == [mk.call()]\n\n # vector with bounds\n with mk.patch.object(Spline1D, \"bspline\",\n new_callable=mk.PropertyMock) as mkBspline:\n knots = np.random.random(10)\n spl = Spline1D()\n assert spl._t is None\n spl._init_knots(knots, True, lower, upper)\n t = np.concatenate((lower, knots, upper))\n assert (spl._t == t).all()\n assert mkBspline.call_args_list == [mk.call()]\n\n # vector with no bounds\n with mk.patch.object(Spline1D, \"bspline\",\n new_callable=mk.PropertyMock) as mkBspline:\n knots = np.random.random(10)\n spl = Spline1D()\n assert spl._t is None\n spl._init_knots(knots, False, lower, upper)\n assert (spl._t == knots).all()\n assert mkBspline.call_args_list == [mk.call()]\n\n # error\n for num in range(8):\n knots = np.random.random(num)\n spl = Spline1D()\n assert spl._t is None\n with pytest.raises(ValueError) as err:\n spl._init_knots(knots, False, lower, upper)\n assert str(err.value) == \"Must have at least 8 knots.\"\n\n # Error\n spl = Spline1D()\n assert spl._t is None\n with pytest.raises(ValueError) as err:\n spl._init_knots(0.5, False, lower, upper)\n assert str(err.value) == \"Knots: 0.5 must be iterable or value\"\n\n def test__init_coeffs(self):\n np.random.seed(492)\n # No coeffs\n with mk.patch.object(Spline1D, \"bspline\",\n new_callable=mk.PropertyMock) as mkBspline:\n spl = Spline1D()\n assert spl._c is None\n spl._t = [1, 2, 3, 4]\n spl._init_coeffs()\n assert (spl._c == [0, 0, 0, 0]).all()\n assert mkBspline.call_args_list == [mk.call()]\n\n # Some coeffs\n with mk.patch.object(Spline1D, \"bspline\",\n new_callable=mk.PropertyMock) as mkBspline:\n coeffs = np.random.random(10)\n spl = Spline1D()\n assert spl._c is None\n spl._init_coeffs(coeffs)\n assert (spl._c == coeffs).all()\n assert mkBspline.call_args_list == [mk.call()]\n\n def test__init_data(self):\n spl = Spline1D()\n\n knots = mk.MagicMock()\n coeffs = mk.MagicMock()\n bounds = mk.MagicMock()\n has_bounds = mk.MagicMock()\n lower = mk.MagicMock()\n upper = mk.MagicMock()\n with mk.patch.object(Spline1D, '_init_bounds', autospec=True,\n return_value=(has_bounds, lower, upper)) as mkBounds:\n with mk.patch.object(Spline1D, '_init_knots',\n autospec=True) as mkKnots:\n with mk.patch.object(Spline1D, '_init_coeffs',\n autospec=True) as mkCoeffs:\n main = mk.MagicMock()\n main.attach_mock(mkBounds, 'bounds')\n main.attach_mock(mkKnots, 'knots')\n main.attach_mock(mkCoeffs, 'coeffs')\n\n spl._init_data(knots, coeffs, bounds)\n assert main.mock_calls == [\n mk.call.bounds(spl, bounds),\n mk.call.knots(spl, knots, has_bounds, lower, upper),\n mk.call.coeffs(spl, coeffs)\n ]\n\n def test_evaluate(self):\n spl = Spline1D()\n\n args = tuple(mk.MagicMock() for _ in range(3))\n kwargs = {f\"test{idx}\": mk.MagicMock() for idx in range(3)}\n new_kwargs = {f\"new_test{idx}\": mk.MagicMock() for idx in range(3)}\n\n with mk.patch.object(_Spline, 'evaluate', autospec=True,\n return_value=new_kwargs) as mkEval:\n with mk.patch.object(Spline1D, \"bspline\",\n new_callable=mk.PropertyMock) as mkBspline:\n assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)\n assert mkBspline.return_value.call_args_list == [mk.call(args[0], **new_kwargs)]\n assert mkBspline.call_args_list == [mk.call()]\n assert mkEval.call_args_list == [mk.call(spl, *args, **kwargs)]\n\n # Error\n for idx in range(5, 8):\n with mk.patch.object(_Spline, 'evaluate', autospec=True,\n return_value={'nu': idx}):\n with pytest.raises(RuntimeError) as err:\n spl.evaluate(*args, **kwargs)\n assert str(err.value) == \"Cannot evaluate a derivative of order higher than 4\"\n\n def check_knots_created(self, spl, k):\n def value0(idx):\n return self.x[0]\n\n def value1(idx):\n return self.x[-1]\n\n for idx in range(k + 1):\n name = f\"knot{idx}\"\n self.check_parameter(spl, \"knot\", name, idx, value0, True)\n\n index = len(spl.t) - (k + 1) + idx\n name = f\"knot{index}\"\n self.check_parameter(spl, \"knot\", name, index, value1, True)\n\n def value3(idx):\n return spl.t[idx]\n\n assert len(spl._knot_names) == len(spl.t)\n for idx, name in enumerate(spl._knot_names):\n assert name == f\"knot{idx}\"\n self.check_parameter(spl, \"knot\", name, idx, value3, True)\n\n def check_coeffs_created(self, spl):\n def value(idx):\n return spl.c[idx]\n\n assert len(spl._coeff_names) == len(spl.c)\n for idx, name in enumerate(spl._coeff_names):\n assert name == f\"coeff{idx}\"\n self.check_parameter(spl, \"coeff\", name, idx, value, False)\n\n @staticmethod\n def check_base_spline(spl, t, c, k):\n \"\"\"Check the base spline form\"\"\"\n if t is None:\n assert spl._t is None\n else:\n assert_allclose(spl._t, t)\n\n if c is None:\n assert spl._c is None\n else:\n assert_allclose(spl._c, c)\n\n assert spl.degree == k\n assert spl._bounding_box is None\n\n def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):\n \"\"\"Check the spline fit\"\"\"\n assert_allclose(fit_spl.t, spline._eval_args[0])\n assert_allclose(fit_spl.c, spline._eval_args[1])\n assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])\n assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])\n\n # check that _parameters are correct\n assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)\n assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)\n assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)\n\n # check that parameters are correct\n assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)\n assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)\n assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)\n\n assert_allclose(spline.get_residual(), fitter.fit_info['resid'])\n\n assert_allclose(fit_spl(self.x), spline(self.x))\n assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))\n\n assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)\n assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)\n\n def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):\n \"\"\"Check the spline fit with bbox option\"\"\"\n bbox = [self.x[0], self.x[-1]]\n bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)\n assert bbox_spl.bounding_box == tuple(bbox)\n assert_allclose(fit_spl.t, bbox_spl.t)\n assert_allclose(fit_spl.c, bbox_spl.c)\n\n def check_knots_warning(self, fitter, knots, k, w, **kwargs):\n \"\"\"Check that the knots warning is raised\"\"\"\n spl = Spline1D(knots=knots, degree=k)\n with pytest.warns(AstropyUserWarning):\n fitter(spl, self.x, self.y, weights=w, **kwargs)\n\n @pytest.mark.parametrize('w', wieght_tests)\n @pytest.mark.parametrize('k', degree_tests)\n def test_interpolate_fitter(self, w, k):\n fitter = SplineInterpolateFitter()\n assert fitter.fit_info == {'resid': None, 'spline': None}\n\n spl = Spline1D(degree=k)\n self.check_base_spline(spl, None, None, k)\n\n fit_spl = fitter(spl, self.x, self.y, weights=w)\n self.check_base_spline(spl, None, None, k)\n\n assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)\n self.check_knots_created(fit_spl, k)\n self.check_coeffs_created(fit_spl)\n assert fit_spl._bounding_box is None\n\n from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline\n spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)\n assert isinstance(fitter.fit_info['spline'], UnivariateSpline)\n\n assert spline.get_residual() == 0\n self.check_spline_fit(fit_spl, spline, fitter, 0, 1)\n self.check_bbox(spl, fit_spl, fitter, w)\n\n knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)\n self.check_knots_warning(fitter, knots, k, w)\n\n @pytest.mark.parametrize('w', wieght_tests)\n @pytest.mark.parametrize('k', degree_tests)\n @pytest.mark.parametrize('s', smoothing_tests)\n def test_smoothing_fitter(self, w, k, s):\n fitter = SplineSmoothingFitter()\n assert fitter.fit_info == {'resid': None, 'spline': None}\n\n spl = Spline1D(degree=k)\n self.check_base_spline(spl, None, None, k)\n\n fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)\n self.check_base_spline(spl, None, None, k)\n\n self.check_knots_created(fit_spl, k)\n self.check_coeffs_created(fit_spl)\n assert fit_spl._bounding_box is None\n\n from scipy.interpolate import UnivariateSpline\n spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)\n assert isinstance(fitter.fit_info['spline'], UnivariateSpline)\n\n self.check_spline_fit(fit_spl, spline, fitter, 1, 1)\n self.check_bbox(spl, fit_spl, fitter, w, s=s)\n\n # test warning\n knots = fit_spl.t.copy()\n self.check_knots_warning(fitter, knots, k, w, s=s)\n\n @pytest.mark.parametrize('w', wieght_tests)\n @pytest.mark.parametrize('k', degree_tests)\n def test_exact_knots_fitter(self, w, k):\n fitter = SplineExactKnotsFitter()\n assert fitter.fit_info == {'resid': None, 'spline': None}\n\n knots = [-1, 0, 1]\n t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))\n c = np.zeros(len(t))\n\n # With knots preset\n spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])\n self.check_base_spline(spl, t, c, k)\n assert (spl.t_interior == knots).all()\n\n fit_spl = fitter(spl, self.x, self.y, weights=w)\n self.check_base_spline(spl, t, c, k)\n assert (spl.t_interior == knots).all()\n\n assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)\n self.check_knots_created(fit_spl, k)\n self.check_coeffs_created(fit_spl)\n assert fit_spl._bounding_box is None\n\n from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline\n spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)\n assert isinstance(fitter.fit_info['spline'], UnivariateSpline)\n\n assert_allclose(spline.get_residual(), 0.1, atol=1)\n assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)\n self.check_spline_fit(fit_spl, spline, fitter, 1, 1)\n self.check_bbox(spl, fit_spl, fitter, w)\n\n # Pass knots via fitter function\n with pytest.warns(AstropyUserWarning):\n fitter(spl, self.x, self.y, t=knots, weights=w)\n\n # pass no knots\n spl = Spline1D(degree=k)\n with pytest.raises(RuntimeError) as err:\n fitter(spl, self.x, self.y, weights=w)\n assert str(err.value) == \"No knots have been provided\"\n\n @pytest.mark.parametrize('w', wieght_tests)\n @pytest.mark.parametrize('k', degree_tests)\n @pytest.mark.parametrize('s', smoothing_tests)\n def test_splrep_fitter_no_knots(self, w, k, s):\n fitter = SplineSplrepFitter()\n assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}\n\n spl = Spline1D(degree=k)\n self.check_base_spline(spl, None, None, k)\n\n fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)\n self.check_base_spline(spl, None, None, k)\n\n self.check_knots_created(fit_spl, k)\n self.check_coeffs_created(fit_spl)\n assert fit_spl._bounding_box is None\n\n from scipy.interpolate import BSpline, splrep\n tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,\n w=w, k=k, s=s, full_output=1)\n assert_allclose(fit_spl.t, tck[0])\n assert_allclose(fit_spl.c, tck[1])\n\n assert fitter.fit_info['fp'] == spline_fp\n assert fitter.fit_info['ier'] == spline_ier\n assert fitter.fit_info['msg'] == spline_msg\n\n spline = BSpline(*tck)\n assert_allclose(fit_spl(self.x), spline(self.x))\n\n assert_allclose(fit_spl(self.x), self.y, atol=1)\n assert_allclose(fit_spl(self.x), self.truth, atol=1)\n\n self.check_bbox(spl, fit_spl, fitter, w, s=s)\n\n @pytest.mark.parametrize('w', wieght_tests)\n @pytest.mark.parametrize('k', degree_tests)\n def test_splrep_fitter_with_knots(self, w, k):\n fitter = SplineSplrepFitter()\n assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}\n\n knots = [-1, 0, 1]\n t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))\n c = np.zeros(len(t))\n\n # With knots preset\n spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])\n self.check_base_spline(spl, t, c, k)\n assert (spl.t_interior == knots).all()\n\n fit_spl = fitter(spl, self.x, self.y, weights=w)\n self.check_base_spline(spl, t, c, k)\n assert (spl.t_interior == knots).all()\n\n self.check_knots_created(fit_spl, k)\n self.check_coeffs_created(fit_spl)\n assert fit_spl._bounding_box is None\n\n from scipy.interpolate import BSpline, splrep\n tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,\n w=w, k=k, t=knots, full_output=1)\n assert_allclose(fit_spl.t, tck[0])\n assert_allclose(fit_spl.c, tck[1])\n\n assert fitter.fit_info['fp'] == spline_fp\n assert fitter.fit_info['ier'] == spline_ier\n assert fitter.fit_info['msg'] == spline_msg\n\n spline = BSpline(*tck)\n assert_allclose(fit_spl(self.x), spline(self.x))\n\n assert_allclose(fit_spl(self.x), self.y, atol=1)\n assert_allclose(fit_spl(self.x), self.truth, atol=1)\n\n self.check_bbox(spl, fit_spl, fitter, w)\n\n # test warning\n with pytest.warns(AstropyUserWarning):\n fitter(spl, self.x, self.y, t=knots, weights=w)\n\n # With no knots present\n spl = Spline1D(degree=k)\n self.check_base_spline(spl, None, None, k)\n\n fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)\n self.check_base_spline(spl, None, None, k)\n\n self.check_knots_created(fit_spl, k)\n self.check_coeffs_created(fit_spl)\n assert fit_spl._bounding_box is None\n\n from scipy.interpolate import BSpline, splrep\n tck = splrep(self.x, self.y, w=w, k=k, t=knots)\n assert_allclose(fit_spl.t, tck[0])\n assert_allclose(fit_spl.c, tck[1])\n\n spline = BSpline(*tck)\n assert_allclose(fit_spl(self.x), spline(self.x))\n\n assert_allclose(fit_spl(self.x), self.y, atol=1)\n assert_allclose(fit_spl(self.x), self.truth, atol=1)\n\n self.check_bbox(spl, fit_spl, fitter, w, t=knots)\n\n def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):\n if k is None:\n k = 3\n\n from scipy.interpolate import BSpline, splrep\n\n tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],\n k=k, s=s, t=t)\n\n return BSpline(*tck)\n\n def test_derivative(self):\n bspline = self.generate_spline()\n\n spl = Spline1D()\n spl.bspline = bspline\n assert_allclose(spl.t, bspline.t)\n assert_allclose(spl.c, bspline.c)\n assert spl.degree == bspline.k\n\n # 1st derivative\n d_bspline = bspline.derivative(nu=1)\n assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))\n assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))\n assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))\n assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))\n\n der = spl.derivative()\n assert_allclose(der.t, d_bspline.t)\n assert_allclose(der.c, d_bspline.c)\n assert der.degree == d_bspline.k == 2\n assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))\n assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))\n assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))\n assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))\n\n # 2nd derivative\n d_bspline = bspline.derivative(nu=2)\n assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))\n assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))\n assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))\n\n der = spl.derivative(nu=2)\n assert_allclose(der.t, d_bspline.t)\n assert_allclose(der.c, d_bspline.c)\n assert der.degree == d_bspline.k == 1\n assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))\n assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))\n assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))\n\n # 3rd derivative\n d_bspline = bspline.derivative(nu=3)\n assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))\n assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))\n\n der = spl.derivative(nu=3)\n assert_allclose(der.t, d_bspline.t)\n assert_allclose(der.c, d_bspline.c)\n assert der.degree == d_bspline.k == 0\n assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))\n assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))\n\n # Too many derivatives\n for nu in range(4, 9):\n with pytest.raises(ValueError) as err:\n spl.derivative(nu=nu)\n assert str(err.value) == \"Must have nu <= 3\"\n\n def test_antiderivative(self):\n bspline = self.generate_spline()\n\n spl = Spline1D()\n spl.bspline = bspline\n\n # 1st antiderivative\n a_bspline = bspline.antiderivative(nu=1)\n assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))\n assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))\n assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))\n assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))\n assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))\n\n anti = spl.antiderivative()\n assert_allclose(anti.t, a_bspline.t)\n assert_allclose(anti.c, a_bspline.c)\n assert anti.degree == a_bspline.k == 4\n assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))\n assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))\n assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))\n assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))\n assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))\n\n # 2nd antiderivative\n a_bspline = bspline.antiderivative(nu=2)\n assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))\n assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))\n assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))\n assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))\n assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))\n\n anti = spl.antiderivative(nu=2)\n assert_allclose(anti.t, a_bspline.t)\n assert_allclose(anti.c, a_bspline.c)\n assert anti.degree == a_bspline.k == 5\n assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))\n assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))\n assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))\n assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))\n assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))\n\n # Too many anti derivatives\n for nu in range(3, 9):\n with pytest.raises(ValueError) as err:\n spl.antiderivative(nu=nu)\n assert str(err.value) == (\"Supported splines can have max degree 5, \"\n f\"antiderivative degree will be {nu + 3}\")\n\n def test__SplineFitter_error(self):\n spl = Spline1D()\n\n class SplineFitter(_SplineFitter):\n def _fit_method(self, model, x, y, **kwargs):\n super()._fit_method(model, x, y, **kwargs)\n\n fitter = SplineFitter()\n\n with pytest.raises(ValueError) as err:\n fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())\n assert str(err.value) == \"1D model can only have 2 data points.\"\n\n with pytest.raises(ModelDefinitionError) as err:\n fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())\n assert str(err.value) == \"Only spline models are compatible with this fitter.\"\n\n with pytest.raises(NotImplementedError) as err:\n fitter(spl, mk.MagicMock(), mk.MagicMock())\n assert str(err.value) == \"This has not been implemented for _SplineFitter.\"\n"}}},{"rowIdx":1363,"cells":{"hash":{"kind":"string","value":"239a1767ee6e27c90d12c8664dbc2b8573c5c2dfa89142356cf0e67fe6b3593b"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nTests models.parameters\n\"\"\"\n# pylint: disable=invalid-name\n\nimport functools\nimport itertools\nimport unittest.mock as mk\n\nimport numpy as np\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.modeling import fitting, models\nfrom astropy.modeling.core import FittableModel, Model\nfrom astropy.modeling.parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline\nfrom astropy.utils.data import get_pkg_data_filename\n\nfrom . import irafutil\n\n\ndef setter1(val):\n return val\n\n\ndef setter2(val, model):\n model.do_something(val)\n return val * model.p\n\n\nclass SetterModel(FittableModel):\n\n n_inputs = 2\n n_outputs = 1\n\n xc = Parameter(default=1, setter=setter1)\n yc = Parameter(default=1, setter=setter2)\n\n def do_something(self, v):\n pass\n\n def __init__(self, xc, yc, p):\n self.p = p # p is a value intended to be used by the setter\n super().__init__()\n self.xc = xc\n self.yc = yc\n\n def evaluate(self, x, y, xc, yc):\n return (x - xc)**2 + (y - yc)**2\n\n\nclass TParModel(Model):\n \"\"\"\n A toy model to test parameters machinery\n \"\"\"\n\n coeff = Parameter()\n e = Parameter()\n\n def __init__(self, coeff, e, **kwargs):\n super().__init__(coeff=coeff, e=e, **kwargs)\n\n @staticmethod\n def evaluate(coeff, e):\n pass\n\n\nclass MockModel(FittableModel):\n alpha = Parameter(name='alpha', default=42)\n\n @staticmethod\n def evaluate(*args):\n pass\n\n\ndef test__tofloat():\n # iterable\n value = _tofloat([1, 2, 3])\n assert isinstance(value, np.ndarray)\n assert (value == np.array([1, 2, 3])).all()\n assert np.all([isinstance(val, float) for val in value])\n value = _tofloat(np.array([1, 2, 3]))\n assert isinstance(value, np.ndarray)\n assert (value == np.array([1, 2, 3])).all()\n assert np.all([isinstance(val, float) for val in value])\n with pytest.raises(InputParameterError) as err:\n _tofloat('test')\n assert str(err.value) == \"Parameter of could not be converted to float\"\n\n # quantity\n assert _tofloat(1 * u.m) == 1 * u.m\n\n # dimensions/scalar array\n value = _tofloat(np.asanyarray(3))\n assert isinstance(value, float)\n assert value == 3\n\n # A regular number\n value = _tofloat(3)\n assert isinstance(value, float)\n assert value == 3\n value = _tofloat(3.0)\n assert isinstance(value, float)\n assert value == 3\n value = _tofloat(np.float32(3))\n assert isinstance(value, float)\n assert value == 3\n value = _tofloat(np.float64(3))\n assert isinstance(value, float)\n assert value == 3\n value = _tofloat(np.int32(3))\n assert isinstance(value, float)\n assert value == 3\n value = _tofloat(np.int64(3))\n assert isinstance(value, float)\n assert value == 3\n\n # boolean\n message = \"Expected parameter to be of numerical type, not boolean\"\n with pytest.raises(InputParameterError) as err:\n _tofloat(True)\n assert str(err.value) == message\n with pytest.raises(InputParameterError) as err:\n _tofloat(False)\n assert str(err.value) == message\n\n # other\n class Value:\n pass\n with pytest.raises(InputParameterError) as err:\n _tofloat(Value)\n assert str(err.value) == \"Don't know how to convert parameter of to float\"\n\n\ndef test_parameter_properties():\n \"\"\"Test if getting / setting of Parameter properties works.\"\"\"\n\n p = Parameter('alpha', default=1)\n\n assert p.name == 'alpha'\n\n # Parameter names are immutable\n with pytest.raises(AttributeError):\n p.name = 'beta'\n\n assert p.fixed is False\n p.fixed = True\n assert p.fixed is True\n\n assert p.tied is False\n p.tied = lambda _: 0\n\n p.tied = False\n assert p.tied is False\n\n assert p.min is None\n p.min = 42\n assert p.min == 42\n p.min = None\n assert p.min is None\n\n assert p.max is None\n p.max = 41\n assert p.max == 41\n\n\ndef test_parameter_operators():\n \"\"\"Test if the parameter arithmetic operators work.\"\"\"\n\n par = Parameter('alpha', default=42)\n num = 42.\n val = 3\n\n assert par - val == num - val\n assert val - par == val - num\n assert par / val == num / val\n assert val / par == val / num\n assert par ** val == num ** val\n assert val ** par == val ** num\n assert par < 45\n assert par > 41\n assert par <= par\n assert par >= par\n assert par == par\n assert -par == -num\n assert abs(par) == abs(num)\n\n# Test inherited models\n\n\nclass M1(Model):\n m1a = Parameter(default=1.)\n m1b = Parameter(default=5.)\n\n def evaluate():\n pass\n\n\nclass M2(M1):\n m2c = Parameter(default=11.)\n\n\nclass M3(M2):\n m3d = Parameter(default=20.)\n\n\ndef test_parameter_inheritance():\n mod = M3()\n assert mod.m1a == 1.\n assert mod.m1b == 5.\n assert mod.m2c == 11.\n assert mod.m3d == 20.\n for key in ['m1a', 'm1b', 'm2c', 'm3d']:\n assert key in mod.__dict__\n assert mod.param_names == ('m1a', 'm1b', 'm2c', 'm3d')\n\n\ndef test_param_metric():\n mod = M3()\n assert mod._param_metrics['m1a']['slice'] == slice(0, 1)\n assert mod._param_metrics['m1b']['slice'] == slice(1, 2)\n assert mod._param_metrics['m2c']['slice'] == slice(2, 3)\n assert mod._param_metrics['m3d']['slice'] == slice(3, 4)\n mod._parameters_to_array()\n assert (mod._parameters == np.array([1., 5., 11., 20], dtype=np.float64)).all()\n\n\nclass TestParameters:\n\n def setup_class(self):\n \"\"\"\n Unit tests for parameters\n\n Read an iraf database file created by onedspec.identify. Use the\n information to create a 1D Chebyshev model and perform the same fit.\n\n Create also a gaussian model.\n \"\"\"\n test_file = get_pkg_data_filename('data/idcompspec.fits')\n f = open(test_file)\n lines = f.read()\n reclist = lines.split(\"begin\")\n f.close()\n record = irafutil.IdentifyRecord(reclist[1])\n self.icoeff = record.coeff\n order = int(record.fields['order'])\n self.model = models.Chebyshev1D(order - 1)\n self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)\n self.linear_fitter = fitting.LinearLSQFitter()\n self.x = record.x\n self.y = record.z\n self.yy = np.array([record.z, record.z])\n\n def test_set_parameters_as_list(self):\n \"\"\"Tests updating parameters using a list.\"\"\"\n\n self.model.parameters = [30, 40, 50, 60, 70]\n assert (self.model.parameters == [30., 40., 50., 60, 70]).all()\n\n def test_set_parameters_as_array(self):\n \"\"\"Tests updating parameters using an array.\"\"\"\n\n self.model.parameters = np.array([3, 4, 5, 6, 7])\n assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()\n\n def test_set_as_tuple(self):\n \"\"\"Tests updating parameters using a tuple.\"\"\"\n\n self.model.parameters = (1, 2, 3, 4, 5)\n assert (self.model.parameters == [1, 2, 3, 4, 5]).all()\n\n def test_set_model_attr_seq(self):\n \"\"\"\n Tests updating the parameters attribute when a model's\n parameter (in this case coeff) is updated.\n \"\"\"\n\n self.model.parameters = [0, 0., 0., 0, 0]\n self.model.c0 = 7\n assert (self.model.parameters == [7, 0., 0., 0, 0]).all()\n\n def test_set_model_attr_num(self):\n \"\"\"Update the parameter list when a model's parameter is updated.\"\"\"\n\n self.gmodel.amplitude = 7\n assert (self.gmodel.parameters == [7, 3, 4]).all()\n\n def test_set_item(self):\n \"\"\"Update the parameters using indexing.\"\"\"\n\n self.model.parameters = [1, 2, 3, 4, 5]\n tpar = self.model.parameters\n tpar[0] = 10.\n self.model.parameters = tpar\n assert (self.model.parameters == [10, 2, 3, 4, 5]).all()\n assert self.model.c0 == 10\n\n def test_wrong_size1(self):\n \"\"\"\n Tests raising an error when attempting to reset the parameters\n using a list of a different size.\n \"\"\"\n\n with pytest.raises(InputParameterError):\n self.model.parameters = [1, 2, 3]\n\n def test_wrong_size2(self):\n \"\"\"\n Tests raising an exception when attempting to update a model's\n parameter (in this case coeff) with a sequence of the wrong size.\n \"\"\"\n\n with pytest.raises(InputParameterError):\n self.model.c0 = [1, 2, 3]\n\n def test_wrong_shape(self):\n \"\"\"\n Tests raising an exception when attempting to update a model's\n parameter and the new value has the wrong shape.\n \"\"\"\n\n with pytest.raises(InputParameterError):\n self.gmodel.amplitude = [1, 2]\n\n def test_par_against_iraf(self):\n \"\"\"\n Test the fitter modifies model.parameters.\n\n Uses an iraf example.\n \"\"\"\n\n new_model = self.linear_fitter(self.model, self.x, self.y)\n np.testing.assert_allclose(\n new_model.parameters,\n np.array([4826.1066602783685, 952.8943813407858, 12.641236013982386,\n -1.7910672553339604, 0.90252884366711317]),\n rtol=10 ** (-2))\n\n def testPolynomial1D(self):\n d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}\n p1 = models.Polynomial1D(3, **d)\n np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])\n\n def test_poly1d_multiple_sets(self):\n p1 = models.Polynomial1D(3, n_models=3)\n np.testing.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0])\n np.testing.assert_array_equal(p1.c0, [0, 0, 0])\n p1.c0 = [10, 10, 10]\n np.testing.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n\n def test_par_slicing(self):\n \"\"\"\n Test assigning to a parameter slice\n \"\"\"\n p1 = models.Polynomial1D(3, n_models=3)\n p1.c0[:2] = [10, 10]\n np.testing.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n\n def test_poly2d(self):\n p2 = models.Polynomial2D(degree=3)\n p2.c0_0 = 5\n np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n def test_poly2d_multiple_sets(self):\n kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],\n 'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}\n p2 = models.Polynomial2D(2, **kw)\n np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,\n 1, 1, 2, 2, 5, 5])\n\n def test_shift_model_parameters1d(self):\n sh1 = models.Shift(2)\n sh1.offset = 3\n assert sh1.offset == 3\n assert sh1.offset.value == 3\n\n def test_scale_model_parametersnd(self):\n sc1 = models.Scale([2, 2])\n sc1.factor = [3, 3]\n assert np.all(sc1.factor == [3, 3])\n np.testing.assert_array_equal(sc1.factor.value, [3, 3])\n\n def test_bounds(self):\n # Valid __init__\n param = Parameter(bounds=(1, 2))\n assert param.bounds == (1, 2)\n param = Parameter(min=1, max=2)\n assert param.bounds == (1, 2)\n\n # Errors __init__\n message = (\"bounds may not be specified simultaneously with min or max\"\n \" when instantiating Parameter test\")\n with pytest.raises(ValueError) as err:\n Parameter(bounds=(1, 2), min=1, name='test')\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n Parameter(bounds=(1, 2), max=2, name='test')\n assert str(err.value) == message\n with pytest.raises(ValueError) as err:\n Parameter(bounds=(1, 2), min=1, max=2, name='test')\n assert str(err.value) == message\n\n # Setters\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.bounds == (None, None) == param._bounds\n\n # Set errors\n with pytest.raises(TypeError) as err:\n param.bounds = ('test', None)\n assert str(err.value) == \"Min value must be a number or a Quantity\"\n with pytest.raises(TypeError) as err:\n param.bounds = (None, 'test')\n assert str(err.value) == \"Max value must be a number or a Quantity\"\n\n # Set number\n param.bounds = (1, 2)\n assert param.bounds == (1, 2) == param._bounds\n\n # Set Quantity\n param.bounds = (1 * u.m, 2 * u.m)\n assert param.bounds == (1, 2) == param._bounds\n\n def test_modify_value(self):\n param = Parameter(name='test', default=[1, 2, 3])\n assert (param.value == [1, 2, 3]).all()\n\n # Errors\n with pytest.raises(InputParameterError) as err:\n param[slice(0, 0)] = 2\n assert str(err.value) == \"Slice assignment outside the parameter dimensions for 'test'\"\n\n with pytest.raises(InputParameterError) as err:\n param[3] = np.array([5])\n assert str(err.value) == \"Input dimension 3 invalid for 'test' parameter with dimension 1\"\n\n # assignment of a slice\n param[slice(0, 2)] = [4, 5]\n assert (param.value == [4, 5, 3]).all()\n\n # assignment of a value\n param[2] = 6\n assert (param.value == [4, 5, 6]).all()\n\n def test__set_unit(self):\n param = Parameter(name='test', default=[1, 2, 3])\n assert param.unit is None\n\n # No force Error (no existing unit)\n with pytest.raises(ValueError) as err:\n param._set_unit(u.m)\n assert str(err.value) == (\"Cannot attach units to parameters that were \"\n \"not initially specified with units\")\n\n # Force\n param._set_unit(u.m, True)\n assert param.unit == u.m\n\n # No force Error (existing unit)\n with pytest.raises(ValueError) as err:\n param._set_unit(u.K)\n assert str(err.value) == (\"Cannot change the unit attribute directly, instead change the \"\n \"parameter to a new quantity\")\n\n def test_quantity(self):\n param = Parameter(name='test', default=[1, 2, 3])\n assert param.unit is None\n assert param.quantity is None\n\n param = Parameter(name='test', default=[1, 2, 3], unit=u.m)\n assert param.unit == u.m\n assert (param.quantity == np.array([1, 2, 3]) * u.m).all()\n\n def test_shape(self):\n # Array like\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.shape == (4,)\n # Reshape error\n with pytest.raises(ValueError) as err:\n param.shape = (5,)\n assert str(err.value) == \"cannot reshape array of size 4 into shape (5,)\"\n # Reshape success\n param.shape = (2, 2)\n assert param.shape == (2, 2)\n assert (param.value == [[1, 2], [3, 4]]).all()\n\n # Scalar\n param = Parameter(name='test', default=1)\n assert param.shape == ()\n # Reshape error\n with pytest.raises(ValueError) as err:\n param.shape = (5,)\n assert str(err.value) == \"Cannot assign this shape to a scalar quantity\"\n param.shape = (1,)\n\n # single value\n param = Parameter(name='test', default=np.array([1]))\n assert param.shape == (1,)\n # Reshape error\n with pytest.raises(ValueError) as err:\n param.shape = (5,)\n assert str(err.value) == \"Cannot assign this shape to a scalar quantity\"\n param.shape = ()\n\n def test_size(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.size == 4\n\n param = Parameter(name='test', default=[1])\n assert param.size == 1\n\n param = Parameter(name='test', default=1)\n assert param.size == 1\n\n def test_std(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.std is None\n assert param._std is None\n\n param.std = 5\n assert param.std == 5 == param._std\n\n def test_fixed(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.fixed is False\n assert param._fixed is False\n\n # Set error\n with pytest.raises(ValueError) as err:\n param.fixed = 3\n assert str(err.value) == \"Value must be boolean\"\n\n # Set\n param.fixed = True\n assert param.fixed is True\n assert param._fixed is True\n\n def test_tied(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.tied is False\n assert param._tied is False\n\n # Set error\n with pytest.raises(TypeError) as err:\n param.tied = mk.NonCallableMagicMock()\n assert str(err.value) == \"Tied must be a callable or set to False or None\"\n\n # Set None\n param.tied = None\n assert param.tied is None\n assert param._tied is None\n\n # Set False\n param.tied = False\n assert param.tied is False\n assert param._tied is False\n\n # Set other\n tied = mk.MagicMock()\n param.tied = tied\n assert param.tied == tied == param._tied\n\n def test_validator(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param._validator is None\n\n valid = mk.MagicMock()\n param.validator(valid)\n assert param._validator == valid\n\n with pytest.raises(ValueError) as err:\n param.validator(mk.NonCallableMagicMock())\n assert str(err.value) == (\"This decorator method expects a callable.\\n\"\n \"The use of this method as a direct validator is\\n\"\n \"deprecated; use the new validate method instead\\n\")\n\n def test_validate(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param._validator is None\n assert param.model is None\n\n # Run without validator\n param.validate(mk.MagicMock())\n\n # Run with validator but no Model\n validator = mk.MagicMock()\n param.validator(validator)\n assert param._validator == validator\n param.validate(mk.MagicMock())\n assert validator.call_args_list == []\n\n # Full validate\n param._model = mk.MagicMock()\n value = mk.MagicMock()\n param.validate(value)\n assert validator.call_args_list == [mk.call(param._model, value)]\n\n def test_copy(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n copy_param = param.copy()\n\n assert (param == copy_param).all()\n assert id(param) != id(copy_param)\n\n def test_model(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param.model is None\n assert param._model is None\n assert param._model_required is False\n assert (param._value == [1, 2, 3, 4]).all()\n\n setter = mk.MagicMock()\n getter = mk.MagicMock()\n param._setter = setter\n param._getter = getter\n\n # No Model Required\n param._value = [5, 6, 7, 8]\n model0 = mk.MagicMock()\n setter0 = mk.MagicMock()\n getter0 = mk.MagicMock()\n with mk.patch.object(Parameter, '_create_value_wrapper',\n side_effect=[setter0, getter0]) as mkCreate:\n param.model = model0\n assert param.model == model0 == param._model\n assert param._setter == setter0\n assert param._getter == getter0\n assert mkCreate.call_args_list == [\n mk.call(setter, model0),\n mk.call(getter, model0)\n ]\n assert param._value == [5, 6, 7, 8]\n\n param._setter = setter\n param._getter = getter\n\n # Model required\n param._model_required = True\n model1 = mk.MagicMock()\n setter1 = mk.MagicMock()\n getter1 = mk.MagicMock()\n setter1.return_value = [9, 10, 11, 12]\n getter1.return_value = [9, 10, 11, 12]\n with mk.patch.object(Parameter, '_create_value_wrapper',\n side_effect=[setter1, getter1]) as mkCreate:\n param.model = model1\n assert param.model == model1 == param._model\n assert param._setter == setter1\n assert param._getter == getter1\n assert mkCreate.call_args_list == [\n mk.call(setter, model1),\n mk.call(getter, model1)\n ]\n assert (param.value == [9, 10, 11, 12]).all()\n\n param._setter = setter\n param._getter = getter\n param._default = None\n with mk.patch.object(Parameter, '_create_value_wrapper',\n side_effect=[setter1, getter1]) as mkCreate:\n param.model = model1\n assert param.model == model1 == param._model\n assert param._setter == setter1\n assert param._getter == getter1\n assert mkCreate.call_args_list == [\n mk.call(setter, model1),\n mk.call(getter, model1)\n ]\n assert param._value is None\n\n def test_raw_value(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n\n # Normal case\n assert (param._raw_value == param.value).all()\n\n # Bad setter\n param._setter = True\n param._internal_value = 4\n assert param._raw_value == 4\n\n def test__create_value_wrapper(self):\n param = Parameter(name='test', default=[1, 2, 3, 4])\n\n # Bad ufunc\n with pytest.raises(TypeError) as err:\n param._create_value_wrapper(np.add, mk.MagicMock())\n assert str(err.value) == (\"A numpy.ufunc used for Parameter getter/setter \"\n \"may only take one input argument\")\n # Good ufunc\n assert param._create_value_wrapper(np.negative, mk.MagicMock()) == np.negative\n\n # None\n assert param._create_value_wrapper(None, mk.MagicMock()) is None\n\n # wrapper with one argument\n def wrapper1(a):\n pass\n assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1\n\n # wrapper with two argument2\n def wrapper2(a, b):\n pass\n # model is None\n assert param._model_required is False\n assert param._create_value_wrapper(wrapper2, None) == wrapper2\n assert param._model_required is True\n # model is not None\n param._model_required = False\n model = mk.MagicMock()\n with mk.patch.object(functools, 'partial', autospec=True) as mkPartial:\n assert param._create_value_wrapper(wrapper2, model) == mkPartial.return_value\n\n # wrapper with more than 2 arguments\n def wrapper3(a, b, c):\n pass\n with pytest.raises(TypeError) as err:\n param._create_value_wrapper(wrapper3, mk.MagicMock())\n assert str(err.value) == (\"Parameter getter/setter must be a function \"\n \"of either one or two arguments\")\n\n def test_bool(self):\n # single value is true\n param = Parameter(name='test', default=1)\n assert param.value == 1\n assert np.all(param)\n if param:\n assert True\n else:\n assert False\n\n # single value is false\n param = Parameter(name='test', default=0)\n assert param.value == 0\n assert not np.all(param)\n if param:\n assert False\n else:\n assert True\n\n # vector value all true\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert np.all(param.value == [1, 2, 3, 4])\n assert np.all(param)\n if param:\n assert True\n else:\n assert False\n\n # vector value at least one false\n param = Parameter(name='test', default=[1, 2, 0, 3, 4])\n assert np.all(param.value == [1, 2, 0, 3, 4])\n assert not np.all(param)\n if param:\n assert False\n else:\n assert True\n\n def test_param_repr_oneline(self):\n # Single value no units\n param = Parameter(name='test', default=1)\n assert param_repr_oneline(param) == '1.'\n\n # Vector value no units\n param = Parameter(name='test', default=[1, 2, 3, 4])\n assert param_repr_oneline(param) == '[1., 2., 3., 4.]'\n\n # Single value units\n param = Parameter(name='test', default=1*u.m)\n assert param_repr_oneline(param) == '1. m'\n\n # Vector value units\n param = Parameter(name='test', default=[1, 2, 3, 4] * u.m)\n assert param_repr_oneline(param) == '[1., 2., 3., 4.] m'\n\n\nclass TestMultipleParameterSets:\n\n def setup_class(self):\n self.x1 = np.arange(1, 10, .1)\n self.y, self.x = np.mgrid[:10, :7]\n self.x11 = np.array([self.x1, self.x1]).T\n self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],\n n_models=2)\n\n def test_change_par(self):\n \"\"\"\n Test that a change to one parameter as a set propagates to param_sets.\n \"\"\"\n self.gmodel.amplitude = [1, 10]\n np.testing.assert_almost_equal(\n self.gmodel.param_sets,\n np.array([[1.,\n 10],\n [3.5,\n 5.2],\n [0.4,\n 0.7]]))\n np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])\n\n def test_change_par2(self):\n \"\"\"\n Test that a change to one single parameter in a set propagates to\n param_sets.\n \"\"\"\n self.gmodel.amplitude[0] = 11\n np.testing.assert_almost_equal(\n self.gmodel.param_sets,\n np.array([[11.,\n 10],\n [3.5,\n 5.2],\n [0.4,\n 0.7]]))\n np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])\n\n def test_change_parameters(self):\n self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]\n np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])\n np.testing.assert_almost_equal(self.gmodel.mean.value, [9., 5.2])\n\n\nclass TestParameterInitialization:\n \"\"\"\n This suite of tests checks most if not all cases if instantiating a model\n with parameters of different shapes/sizes and with different numbers of\n parameter sets.\n \"\"\"\n\n def test_single_model_scalar_parameters(self):\n t = TParModel(10, 1)\n assert len(t) == 1\n assert t.model_set_axis is False\n assert np.all(t.param_sets == [[10], [1]])\n assert np.all(t.parameters == [10, 1])\n assert t.coeff.shape == ()\n assert t.e.shape == ()\n\n def test_single_model_scalar_and_array_parameters(self):\n t = TParModel(10, [1, 2])\n assert len(t) == 1\n assert t.model_set_axis is False\n assert np.issubdtype(t.param_sets.dtype, np.object_)\n assert len(t.param_sets) == 2\n assert np.all(t.param_sets[0] == [10])\n assert np.all(t.param_sets[1] == [[1, 2]])\n assert np.all(t.parameters == [10, 1, 2])\n assert t.coeff.shape == ()\n assert t.e.shape == (2,)\n\n def test_single_model_1d_array_parameters(self):\n t = TParModel([10, 20], [1, 2])\n assert len(t) == 1\n assert t.model_set_axis is False\n assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])\n assert np.all(t.parameters == [10, 20, 1, 2])\n assert t.coeff.shape == (2,)\n assert t.e.shape == (2,)\n\n def test_single_model_1d_array_different_length_parameters(self):\n with pytest.raises(InputParameterError):\n # Not broadcastable\n TParModel([1, 2], [3, 4, 5])\n\n def test_single_model_2d_array_parameters(self):\n t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])\n assert len(t) == 1\n assert t.model_set_axis is False\n assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],\n [[[1, 2], [3, 4]]]])\n assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])\n assert t.coeff.shape == (2, 2)\n assert t.e.shape == (2, 2)\n\n def test_single_model_2d_non_square_parameters(self):\n coeff = np.array([[10, 20], [30, 40], [50, 60]])\n e = np.array([[1, 2], [3, 4], [5, 6]])\n\n t = TParModel(coeff, e)\n assert len(t) == 1\n assert t.model_set_axis is False\n assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],\n [[[1, 2], [3, 4], [5, 6]]]])\n assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,\n 1, 2, 3, 4, 5, 6])\n assert t.coeff.shape == (3, 2)\n assert t.e.shape == (3, 2)\n\n t2 = TParModel(coeff.T, e.T)\n assert len(t2) == 1\n assert t2.model_set_axis is False\n assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],\n [[[1, 3, 5], [2, 4, 6]]]])\n assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,\n 1, 3, 5, 2, 4, 6])\n assert t2.coeff.shape == (2, 3)\n assert t2.e.shape == (2, 3)\n\n # Not broadcastable\n with pytest.raises(InputParameterError):\n TParModel(coeff, e.T)\n\n with pytest.raises(InputParameterError):\n TParModel(coeff.T, e)\n\n def test_single_model_2d_broadcastable_parameters(self):\n t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])\n assert len(t) == 1\n assert t.model_set_axis is False\n assert len(t.param_sets) == 2\n assert np.issubdtype(t.param_sets.dtype, np.object_)\n assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])\n assert np.all(t.param_sets[1] == [[1, 2, 3]])\n assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])\n\n @pytest.mark.parametrize(('p1', 'p2'), [\n (1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),\n ([1, 2], [3, 4, 5])])\n def test_two_model_incorrect_scalar_parameters(self, p1, p2):\n with pytest.raises(InputParameterError):\n TParModel(p1, p2, n_models=2)\n\n @pytest.mark.parametrize('kwargs', [\n {'n_models': 2}, {'model_set_axis': 0},\n {'n_models': 2, 'model_set_axis': 0}])\n def test_two_model_scalar_parameters(self, kwargs):\n t = TParModel([10, 20], [1, 2], **kwargs)\n assert len(t) == 2\n assert t.model_set_axis == 0\n assert np.all(t.param_sets == [[10, 20], [1, 2]])\n assert np.all(t.parameters == [10, 20, 1, 2])\n assert t.coeff.shape == (2,)\n assert t.e.shape == (2,)\n\n @pytest.mark.parametrize('kwargs', [\n {'n_models': 2}, {'model_set_axis': 0},\n {'n_models': 2, 'model_set_axis': 0}])\n def test_two_model_scalar_and_array_parameters(self, kwargs):\n t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)\n assert len(t) == 2\n assert t.model_set_axis == 0\n assert len(t.param_sets) == 2\n assert np.issubdtype(t.param_sets.dtype, np.object_)\n assert np.all(t.param_sets[0] == [[10], [20]])\n assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])\n assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])\n assert t.coeff.shape == (2,)\n assert t.e.shape == (2, 2)\n\n def test_two_model_1d_array_parameters(self):\n t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)\n assert len(t) == 2\n assert t.model_set_axis == 0\n assert np.all(t.param_sets == [[[10, 20], [30, 40]],\n [[1, 2], [3, 4]]])\n assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])\n assert t.coeff.shape == (2, 2)\n assert t.e.shape == (2, 2)\n\n t2 = TParModel([[10, 20, 30], [40, 50, 60]],\n [[1, 2, 3], [4, 5, 6]], n_models=2)\n assert len(t2) == 2\n assert t2.model_set_axis == 0\n assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],\n [[1, 2, 3], [4, 5, 6]]])\n assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,\n 1, 2, 3, 4, 5, 6])\n assert t2.coeff.shape == (2, 3)\n assert t2.e.shape == (2, 3)\n\n def test_two_model_mixed_dimension_array_parameters(self):\n with pytest.raises(InputParameterError):\n # Can't broadcast different array shapes\n TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[9, 10, 11], [12, 13, 14]], n_models=2)\n\n t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],\n [[1, 2], [3, 4]], n_models=2)\n assert len(t) == 2\n assert t.model_set_axis == 0\n assert len(t.param_sets) == 2\n assert np.issubdtype(t.param_sets.dtype, np.object_)\n assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],\n [[50, 60], [70, 80]]])\n assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])\n assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,\n 1, 2, 3, 4])\n assert t.coeff.shape == (2, 2, 2)\n assert t.e.shape == (2, 2)\n\n def test_two_model_2d_array_parameters(self):\n t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],\n [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)\n assert len(t) == 2\n assert t.model_set_axis == 0\n assert np.all(t.param_sets == [[[[10, 20], [30, 40]],\n [[50, 60], [70, 80]]],\n [[[1, 2], [3, 4]],\n [[5, 6], [7, 8]]]])\n assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,\n 1, 2, 3, 4, 5, 6, 7, 8])\n assert t.coeff.shape == (2, 2, 2)\n assert t.e.shape == (2, 2, 2)\n\n def test_two_model_nonzero_model_set_axis(self):\n # An example where the model set axis is the *last* axis of the\n # parameter arrays\n coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])\n coeff = np.rollaxis(coeff, 0, 3)\n e = np.array([[1, 2, 3], [3, 4, 5]])\n e = np.rollaxis(e, 0, 2)\n t = TParModel(coeff, e, n_models=2, model_set_axis=-1)\n assert len(t) == 2\n assert t.model_set_axis == -1\n assert len(t.param_sets) == 2\n assert np.issubdtype(t.param_sets.dtype, np.object_)\n assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],\n [[30, 70], [40, 80], [50, 90]]])\n assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])\n assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,\n 50, 90, 1, 3, 2, 4, 3, 5])\n assert t.coeff.shape == (2, 3, 2) # note change in api\n assert t.e.shape == (3, 2) # note change in api\n\n def test_wrong_number_of_params(self):\n with pytest.raises(InputParameterError):\n TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)\n with pytest.raises(InputParameterError):\n TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)\n\n def test_wrong_number_of_params2(self):\n with pytest.raises(InputParameterError):\n TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)\n with pytest.raises(InputParameterError):\n TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)\n\n def test_array_parameter1(self):\n with pytest.raises(InputParameterError):\n TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)\n\n def test_array_parameter2(self):\n with pytest.raises(InputParameterError):\n TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),\n model_set_axis=0)\n\n def test_array_parameter4(self):\n \"\"\"\n Test multiple parameter model with array-valued parameters of the same\n size as the number of parameter sets.\n \"\"\"\n\n t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)\n assert len(t4) == 1\n assert t4.coeff.shape == (2, 2)\n assert t4.e.shape == (2,)\n assert np.issubdtype(t4.param_sets.dtype, np.object_)\n assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])\n assert np.all(t4.param_sets[1] == [5, 6])\n\n\ndef test_non_broadcasting_parameters():\n \"\"\"\n Tests that in a model with 3 parameters that do not all mutually broadcast,\n this is determined correctly regardless of what order the parameters are\n in.\n \"\"\"\n\n a = 3\n b = np.array([[1, 2, 3], [4, 5, 6]])\n c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])\n\n class TestModel(Model):\n\n p1 = Parameter()\n p2 = Parameter()\n p3 = Parameter()\n\n def evaluate(self, *args):\n return\n\n # a broadcasts with both b and c, but b does not broadcast with c\n for args in itertools.permutations((a, b, c)):\n with pytest.raises(InputParameterError):\n TestModel(*args)\n\n\ndef test_setter():\n pars = np.random.rand(20).reshape((10, 2))\n\n model = SetterModel(xc=-1, yc=3, p=np.pi)\n\n for x, y in pars:\n np.testing.assert_almost_equal(\n model(x, y),\n (x + 1)**2 + (y - np.pi * 3)**2)\n"}}},{"rowIdx":1364,"cells":{"hash":{"kind":"string","value":"aab09909534fc1531327e6da41fc63cbe186db96a8c7811c3698ebf86cc93116"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# pylint: disable=invalid-name, no-member\n\nimport numpy as np\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.modeling.bounding_box import ModelBoundingBox\nfrom astropy.modeling.core import fix_inputs\nfrom astropy.modeling.fitting import DogBoxLSQFitter, LevMarLSQFitter, LMLSQFitter, TRFLSQFitter\nfrom astropy.modeling.functional_models import (\n AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D, Box1D, Box2D, Const1D, Const2D, Cosine1D,\n Disk2D, Ellipse2D, Exponential1D, Gaussian1D, Gaussian2D, KingProjectedAnalytic1D, Linear1D,\n Logarithmic1D, Lorentz1D, Moffat1D, Moffat2D, Multiply, Planar2D, RickerWavelet1D,\n RickerWavelet2D, Ring2D, Scale, Sersic1D, Sersic2D, Sine1D, Tangent1D, Trapezoid1D,\n TrapezoidDisk2D, Voigt1D)\nfrom astropy.modeling.parameters import InputParameterError\nfrom astropy.modeling.physical_models import Drude1D, Plummer1D\nfrom astropy.modeling.polynomial import Polynomial1D, Polynomial2D\nfrom astropy.modeling.powerlaws import (\n BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D, PowerLaw1D,\n SmoothlyBrokenPowerLaw1D)\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY\n\nfitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]\n\nFUNC_MODELS_1D = [\n {\n 'class': Gaussian1D,\n 'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},\n 'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],\n 'bounding_box': [0.35, 3.65] * u.m\n },\n {\n 'class': Sersic1D,\n 'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},\n 'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],\n 'bounding_box': False\n },\n {\n 'class': Sine1D,\n 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},\n 'evaluation': [(1 * u.s, -3 * u.km / u.s)],\n 'bounding_box': False\n },\n {\n 'class': Cosine1D,\n 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.25},\n 'evaluation': [(1 * u.s, -3 * u.km / u.s)],\n 'bounding_box': False\n },\n {\n 'class': Tangent1D,\n 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},\n 'evaluation': [(1 * u.s, -3 * u.km / u.s)],\n 'bounding_box': [-4, 0] / u.Hz\n },\n {\n 'class': ArcSine1D,\n 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},\n 'evaluation': [(0 * u.km / u.s, -2 * u.s)],\n 'bounding_box': [-3, 3] * u.km / u.s\n },\n {\n 'class': ArcCosine1D,\n 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},\n 'evaluation': [(0 * u.km / u.s, -1 * u.s)],\n 'bounding_box': [-3, 3] * u.km / u.s\n },\n {\n 'class': ArcTangent1D,\n 'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},\n 'evaluation': [(0 * u.km / u.s, -2 * u.s)],\n 'bounding_box': False\n },\n {\n 'class': Linear1D,\n 'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},\n 'evaluation': [(6000 * u.ms, 23 * u.km)],\n 'bounding_box': False\n },\n {\n 'class': Lorentz1D,\n 'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},\n 'evaluation': [(0.51 * u.micron, 1 * u.Jy)],\n 'bounding_box': [255, 755] * u.nm\n },\n {\n 'class': Voigt1D,\n 'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,\n 'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},\n 'evaluation': [(0.51 * u.micron, 1.0621795524 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': Const1D,\n 'parameters': {'amplitude': 3 * u.Jy},\n 'evaluation': [(0.6 * u.micron, 3 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': Box1D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},\n 'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],\n 'bounding_box': [3.9, 4.9] * u.um\n },\n {\n 'class': Trapezoid1D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um,\n 'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},\n 'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],\n 'bounding_box': [3.3, 5.5] * u.um\n },\n {\n 'class': RickerWavelet1D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},\n 'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],\n 'bounding_box': [-5.6, 14.4] * u.um\n },\n {\n 'class': Moffat1D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},\n 'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': KingProjectedAnalytic1D,\n 'parameters': {'amplitude': 1. * u.Msun/u.pc**2, 'r_core': 1. * u.pc, 'r_tide': 2. * u.pc},\n 'evaluation': [(0.5 * u.pc, 0.2 * u.Msun/u.pc**2)],\n 'bounding_box': [0. * u.pc, 2. * u.pc]\n },\n {\n 'class': Logarithmic1D,\n 'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},\n 'evaluation': [(4 * u.m, 3.4657359027997265 * u.m)],\n 'bounding_box': False\n },\n {\n 'class': Exponential1D,\n 'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},\n 'evaluation': [(4 * u.m, 36.945280494653254 * u.m)],\n 'bounding_box': False\n }\n]\n\nSCALE_MODELS = [\n {\n 'class': Scale,\n 'parameters': {'factor': 2*u.m},\n 'evaluation': [(1*u.m, 2*u.m)],\n 'bounding_box': False\n },\n {\n 'class': Multiply,\n 'parameters': {'factor': 2*u.m},\n 'evaluation': [(1 * u.m/u.m, 2*u.m)],\n 'bounding_box': False\n },\n]\n\nPHYS_MODELS_1D = [\n {\n 'class': Plummer1D,\n 'parameters': {'mass': 3 * u.kg, 'r_plum': 0.5 * u.m},\n 'evaluation': [(1 * u.m, 0.10249381 * u.kg / (u.m ** 3))],\n 'bounding_box': False\n },\n {\n 'class': Drude1D,\n 'parameters': {'amplitude': 1.0 * u.m, 'x_0': 2175. * u.AA, 'fwhm': 400. * u.AA},\n 'evaluation': [(2000 * u.AA, 0.5452317018423869 * u.m)],\n 'bounding_box': [-17825, 22175] * u.AA\n },\n]\n\nFUNC_MODELS_2D = [\n {\n 'class': Gaussian2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,\n 'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},\n 'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],\n 'bounding_box': [[-13.02230366, 15.02230366],\n [-12.02230366, 16.02230366]] * u.m\n },\n {\n 'class': Const2D,\n 'parameters': {'amplitude': 3 * u.Jy},\n 'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': Disk2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'R_0': 300 * u.cm},\n 'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],\n 'bounding_box': [[-1, 5], [0, 6]] * u.m\n },\n {\n 'class': TrapezoidDisk2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,\n 'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},\n 'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],\n 'bounding_box': [[-2, 6], [-3, 5]] * u.m\n },\n {\n 'class': Ellipse2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},\n 'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],\n 'bounding_box': [[-0.5495097567963922, 4.549509756796392],\n [0.4504902432036073, 5.549509756796393]] * u.m\n },\n {\n 'class': Ring2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},\n 'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],\n 'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m\n },\n {\n 'class': Box2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,\n 'x_width': 4 * u.cm, 'y_width': 3 * u.s},\n 'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],\n 'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]\n },\n {\n 'class': RickerWavelet2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'sigma': 1 * u.m},\n 'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': AiryDisk2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'radius': 1 * u.m},\n 'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': Moffat2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,\n 'gamma': 1e-3 * u.mm, 'alpha': 1},\n 'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],\n 'bounding_box': False\n },\n {\n 'class': Sersic2D,\n 'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,\n 'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,\n 'ellip': 0, 'theta': 0},\n 'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],\n 'bounding_box': False\n },\n {\n 'class': Planar2D,\n 'parameters': {'slope_x': 2*u.m, 'slope_y': 3*u.m, 'intercept': 4*u.m},\n 'evaluation': [(5*u.m/u.m, 6*u.m/u.m, 32*u.m)],\n 'bounding_box': False\n },\n]\n\nPOWERLAW_MODELS = [\n {\n 'class': PowerLaw1D,\n 'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},\n 'evaluation': [(1 * u.m, 500 * u.g)],\n 'bounding_box': False\n },\n {\n 'class': BrokenPowerLaw1D,\n 'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},\n 'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],\n 'bounding_box': False\n },\n {\n 'class': SmoothlyBrokenPowerLaw1D,\n 'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm,\n 'alpha_1': 1, 'alpha_2': -1, 'delta': 1},\n 'evaluation': [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],\n 'bounding_box': False\n },\n {\n 'class': ExponentialCutoffPowerLaw1D,\n 'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},\n 'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],\n 'bounding_box': False\n },\n {\n 'class': LogParabola1D,\n 'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},\n 'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],\n 'bounding_box': False\n },\n]\n\nPOLY_MODELS = [\n {\n 'class': Polynomial1D,\n 'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},\n 'evaluation': [(3 * u.m, 36 * u.one)],\n 'bounding_box': False\n },\n {\n 'class': Polynomial1D,\n 'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},\n 'evaluation': [(3 * u.m, 36 * u.kg)],\n 'bounding_box': False\n },\n {\n 'class': Polynomial1D,\n 'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},\n 'evaluation': [(3 * u.one, 36 * u.kg)],\n 'bounding_box': False\n },\n {\n 'class': Polynomial2D,\n 'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,\n 'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},\n 'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],\n 'bounding_box': False\n },\n {\n 'class': Polynomial2D,\n 'parameters': {'degree': 2, 'c0_0': 3 * u.kg,\n 'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,\n 'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2,\n 'c1_1': 5 * u.kg / u.m / u.s},\n 'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],\n 'bounding_box': False\n },\n {\n 'class': Polynomial2D,\n 'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,\n 'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},\n 'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],\n 'bounding_box': False\n },\n]\n\n\nMODELS = (FUNC_MODELS_1D + SCALE_MODELS + FUNC_MODELS_2D + POWERLAW_MODELS +\n PHYS_MODELS_1D + POLY_MODELS)\n\nSCIPY_MODELS = {Sersic1D, Sersic2D, AiryDisk2D}\n\n# These models will fail fitting test, because built in fitting data\n# will produce non-finite values\nNON_FINITE_LevMar_MODELS = [\n Sersic1D,\n ArcSine1D,\n ArcCosine1D,\n PowerLaw1D,\n ExponentialCutoffPowerLaw1D,\n BrokenPowerLaw1D,\n LogParabola1D\n]\n\n# These models will fail the TRFLSQFitter fitting test due to non-finite\nNON_FINITE_TRF_MODELS = [\n ArcSine1D,\n ArcCosine1D,\n Sersic1D,\n Sersic2D,\n PowerLaw1D,\n ExponentialCutoffPowerLaw1D,\n BrokenPowerLaw1D\n]\n\n# These models will fail the LMLSQFitter fitting test due to non-finite\nNON_FINITE_LM_MODELS = [\n Sersic1D,\n ArcSine1D,\n ArcCosine1D,\n PowerLaw1D,\n LogParabola1D,\n ExponentialCutoffPowerLaw1D,\n BrokenPowerLaw1D\n]\n\n# These models will fail the DogBoxLSQFitter fitting test due to non-finite\nNON_FINITE_DogBox_MODELS = [\n Sersic1D,\n Sersic2D,\n ArcSine1D,\n ArcCosine1D,\n SmoothlyBrokenPowerLaw1D,\n ExponentialCutoffPowerLaw1D,\n LogParabola1D\n]\n\n\n@pytest.mark.parametrize('model', MODELS)\ndef test_models_evaluate_without_units(model):\n if not HAS_SCIPY and model['class'] in SCIPY_MODELS:\n pytest.skip()\n m = model['class'](**model['parameters'])\n for args in model['evaluation']:\n if len(args) == 2:\n kwargs = dict(zip(('x', 'y'), args))\n else:\n kwargs = dict(zip(('x', 'y', 'z'), args))\n if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):\n kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)\n mnu = m.without_units_for_data(**kwargs)\n args = [x.value for x in kwargs.values()]\n assert_quantity_allclose(mnu(*args[:-1]), args[-1])\n\n\n@pytest.mark.parametrize('model', MODELS)\ndef test_models_evaluate_with_units(model):\n if not HAS_SCIPY and model['class'] in SCIPY_MODELS:\n pytest.skip()\n m = model['class'](**model['parameters'])\n for args in model['evaluation']:\n assert_quantity_allclose(m(*args[:-1]), args[-1])\n\n\n@pytest.mark.parametrize('model', MODELS)\ndef test_models_evaluate_with_units_x_array(model):\n\n if not HAS_SCIPY and model['class'] in SCIPY_MODELS:\n pytest.skip()\n\n m = model['class'](**model['parameters'])\n\n for args in model['evaluation']:\n if len(args) == 2:\n x, y = args\n x_arr = u.Quantity([x, x])\n result = m(x_arr)\n assert_quantity_allclose(result, u.Quantity([y, y]))\n else:\n x, y, z = args\n x_arr = u.Quantity([x, x])\n y_arr = u.Quantity([y, y])\n result = m(x_arr, y_arr)\n assert_quantity_allclose(result, u.Quantity([z, z]))\n\n\n@pytest.mark.parametrize('model', MODELS)\ndef test_models_evaluate_with_units_param_array(model):\n\n if not HAS_SCIPY and model['class'] in SCIPY_MODELS:\n pytest.skip()\n\n params = {}\n for key, value in model['parameters'].items():\n if value is None or key == 'degree':\n params[key] = value\n else:\n params[key] = np.repeat(value, 2)\n\n params['n_models'] = 2\n\n m = model['class'](**params)\n\n for args in model['evaluation']:\n if len(args) == 2:\n x, y = args\n x_arr = u.Quantity([x, x])\n result = m(x_arr)\n assert_quantity_allclose(result, u.Quantity([y, y]))\n else:\n x, y, z = args\n x_arr = u.Quantity([x, x])\n y_arr = u.Quantity([y, y])\n result = m(x_arr, y_arr)\n assert_quantity_allclose(result, u.Quantity([z, z]))\n\n if model['class'] == Drude1D:\n params['x_0'][-1] = 0 * u.AA\n with pytest.raises(InputParameterError) as err:\n model['class'](**params)\n assert str(err.value) == '0 is not an allowed value for x_0'\n\n\n@pytest.mark.parametrize('model', MODELS)\ndef test_models_bounding_box(model):\n\n # In some cases, having units in parameters caused bounding_box to break,\n # so this is to ensure that it works correctly.\n\n if not HAS_SCIPY and model['class'] in SCIPY_MODELS:\n pytest.skip()\n\n m = model['class'](**model['parameters'])\n\n # In the following we need to explicitly test that the value is False\n # since Quantities no longer evaluate as as True\n if model['bounding_box'] is False:\n # Check that NotImplementedError is raised, so that if bounding_box is\n # implemented we remember to set bounding_box=True in the list of models\n # above\n with pytest.raises(NotImplementedError):\n m.bounding_box\n else:\n # A bounding box may have inhomogeneous units so we need to check the\n # values one by one.\n for i in range(len(model['bounding_box'])):\n bbox = m.bounding_box\n if isinstance(bbox, ModelBoundingBox):\n bbox = bbox.bounding_box()\n assert_quantity_allclose(bbox[i], model['bounding_box'][i])\n\n\n@pytest.mark.parametrize('model', MODELS)\ndef test_compound_model_input_units_equivalencies_defaults(model):\n m = model['class'](**model['parameters'])\n\n assert m.input_units_equivalencies is None\n\n compound_model = m + m\n assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None\n fixed_input_model = fix_inputs(compound_model, {'x': 1})\n\n assert fixed_input_model.input_units_equivalencies is None\n\n compound_model = m - m\n assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None\n fixed_input_model = fix_inputs(compound_model, {'x': 1})\n\n assert fixed_input_model.input_units_equivalencies is None\n\n compound_model = m & m\n assert compound_model.inputs_map()['x1'][0].input_units_equivalencies is None\n fixed_input_model = fix_inputs(compound_model, {'x0': 1})\n assert fixed_input_model.inputs_map()['x1'][0].input_units_equivalencies is None\n\n assert fixed_input_model.input_units_equivalencies is None\n\n if m.n_outputs == m.n_inputs:\n compound_model = m | m\n assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None\n fixed_input_model = fix_inputs(compound_model, {'x': 1})\n\n assert fixed_input_model.input_units_equivalencies is None\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')\n@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')\n@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')\n@pytest.mark.parametrize('model', MODELS)\n@pytest.mark.parametrize('fitter', fitters)\ndef test_models_fitting(model, fitter):\n fitter = fitter()\n\n if (\n (isinstance(fitter, LevMarLSQFitter) and model['class'] in NON_FINITE_LevMar_MODELS) or\n (isinstance(fitter, TRFLSQFitter) and model['class'] in NON_FINITE_TRF_MODELS) or\n (isinstance(fitter, LMLSQFitter) and model['class'] in NON_FINITE_LM_MODELS) or\n (isinstance(fitter, DogBoxLSQFitter) and model['class'] in NON_FINITE_DogBox_MODELS)\n ):\n return\n\n m = model['class'](**model['parameters'])\n if len(model['evaluation'][0]) == 2:\n x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit\n y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit\n args = [x, y]\n else:\n x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit\n y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit\n z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit\n args = [x, y, z]\n\n # Test that the model fits even if it has units on parameters\n m_new = fitter(m, *args)\n\n # Check that units have been put back correctly\n for param_name in m.param_names:\n par_bef = getattr(m, param_name)\n par_aft = getattr(m_new, param_name)\n if par_bef.unit is None:\n # If the parameter used to not have a unit then had a radian unit\n # for example, then we should allow that\n assert par_aft.unit is None or par_aft.unit is u.rad\n else:\n assert par_aft.unit.is_equivalent(par_bef.unit)\n\n\nunit_mismatch_models = [\n {'class': Gaussian2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,\n 'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},\n 'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),\n (412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],\n 'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},\n {'class': Ellipse2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},\n 'evaluation': [(4 * u.m, 300 * u.K, 3 * u.Jy),\n (4 * u.K, 300 * u.cm, 3 * u.Jy)],\n 'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},\n {'class': Disk2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'R_0': 300 * u.cm},\n 'evaluation': [(5.8 * u.m, 201 * u.K, 3 * u.Jy),\n (5.8 * u.K, 201 * u.cm, 3 * u.Jy)],\n 'bounding_box': [[-1, 5], [0, 6]] * u.m},\n {'class': Ring2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},\n 'evaluation': [(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),\n (302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy)],\n 'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},\n {'class': TrapezoidDisk2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,\n 'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},\n 'evaluation': [(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),\n (3.5 * u.K, 2 * u.m, 1.5 * u.Jy)],\n 'bounding_box': [[-2, 6], [-3, 5]] * u.m},\n {'class': RickerWavelet2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'sigma': 1 * u.m},\n 'evaluation': [(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),\n (4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy)],\n 'bounding_box': False},\n {'class': AiryDisk2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,\n 'radius': 1 * u.m},\n 'evaluation': [(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),\n (4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy)],\n 'bounding_box': False},\n {'class': Moffat2D,\n 'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,\n 'gamma': 1e-3 * u.mm, 'alpha': 1},\n 'evaluation': [(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),\n (1000 * u.K, 2 * u.um, 0.202565833 * u.Jy)],\n 'bounding_box': False},\n {'class': Sersic2D,\n 'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,\n 'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,\n 'ellip': 0, 'theta': 0},\n 'evaluation': [(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy/u.sr),\n (3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],\n 'bounding_box': False},\n]\n\n\n@pytest.mark.parametrize('model', unit_mismatch_models)\ndef test_input_unit_mismatch_error(model):\n if not HAS_SCIPY and model['class'] in SCIPY_MODELS:\n pytest.skip()\n\n message = \"Units of 'x' and 'y' inputs should match\"\n\n m = model['class'](**model['parameters'])\n\n for args in model['evaluation']:\n if len(args) == 2:\n kwargs = dict(zip(('x', 'y'), args))\n else:\n kwargs = dict(zip(('x', 'y', 'z'), args))\n if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):\n kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)\n with pytest.raises(u.UnitsError) as err:\n m.without_units_for_data(**kwargs)\n assert str(err.value) == message\n"}}},{"rowIdx":1365,"cells":{"hash":{"kind":"string","value":"bdff4af521e02da06a941aa86c05511745f96983a5ed2ddaa66ea373d22d602a"},"content":{"kind":"string","value":"from .iers import *\n"}}},{"rowIdx":1366,"cells":{"hash":{"kind":"string","value":"ae4db7c61c0ab9702090bac360fc8908a9ff56ab7c067158971743d57055424c"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis module includes a fast iterator-based XML parser.\n\"\"\"\n\n# STDLIB\nimport contextlib\nimport io\nimport sys\n\n# ASTROPY\nfrom astropy.utils import data\n\n\n__all__ = ['get_xml_iterator', 'get_xml_encoding', 'xml_readlines']\n\n\n@contextlib.contextmanager\ndef _convert_to_fd_or_read_function(fd):\n \"\"\"\n Returns a function suitable for streaming input, or a file object.\n\n This function is only useful if passing off to C code where:\n\n - If it's a real file object, we want to use it as a real\n C file object to avoid the Python overhead.\n\n - If it's not a real file object, it's much handier to just\n have a Python function to call.\n\n This is somewhat quirky behavior, of course, which is why it is\n private. For a more useful version of similar behavior, see\n `astropy.utils.misc.get_readable_fileobj`.\n\n Parameters\n ----------\n fd : object\n May be:\n\n - a file object. If the file is uncompressed, this raw\n file object is returned verbatim. Otherwise, the read\n method is returned.\n\n - a function that reads from a stream, in which case it is\n returned verbatim.\n\n - a file path, in which case it is opened. Again, like a\n file object, if it's uncompressed, a raw file object is\n returned, otherwise its read method.\n\n - an object with a :meth:`read` method, in which case that\n method is returned.\n\n Returns\n -------\n fd : context-dependent\n See above.\n \"\"\"\n if callable(fd):\n yield fd\n return\n\n with data.get_readable_fileobj(fd, encoding='binary') as new_fd:\n if sys.platform.startswith('win'):\n yield new_fd.read\n else:\n if isinstance(new_fd, io.FileIO):\n yield new_fd\n else:\n yield new_fd.read\n\n\ndef _fast_iterparse(fd, buffersize=2 ** 10):\n from xml.parsers import expat\n\n if not callable(fd):\n read = fd.read\n else:\n read = fd\n\n queue = []\n text = []\n\n def start(name, attr):\n queue.append((True, name, attr,\n (parser.CurrentLineNumber, parser.CurrentColumnNumber)))\n del text[:]\n\n def end(name):\n queue.append((False, name, ''.join(text).strip(),\n (parser.CurrentLineNumber, parser.CurrentColumnNumber)))\n\n parser = expat.ParserCreate()\n parser.specified_attributes = True\n parser.StartElementHandler = start\n parser.EndElementHandler = end\n parser.CharacterDataHandler = text.append\n Parse = parser.Parse\n\n data = read(buffersize)\n while data:\n Parse(data, False)\n yield from queue\n del queue[:]\n data = read(buffersize)\n\n Parse('', True)\n yield from queue\n\n\n# Try to import the C version of the iterparser, otherwise fall back\n# to the Python implementation above.\n_slow_iterparse = _fast_iterparse\ntry:\n from . import _iterparser\n _fast_iterparse = _iterparser.IterParser\nexcept ImportError:\n pass\n\n\n@contextlib.contextmanager\ndef get_xml_iterator(source, _debug_python_based_parser=False):\n \"\"\"\n Returns an iterator over the elements of an XML file.\n\n The iterator doesn't ever build a tree, so it is much more memory\n and time efficient than the alternative in ``cElementTree``.\n\n Parameters\n ----------\n source : path-like, readable file-like, or callable\n Handle that contains the data or function that reads it.\n If a function or callable object, it must directly read from a stream.\n Non-callable objects must define a ``read`` method.\n\n Returns\n -------\n parts : iterator\n\n The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*):\n\n - *start*: when `True` is a start element event, otherwise\n an end element event.\n\n - *tag*: The name of the element\n\n - *data*: Depends on the value of *event*:\n\n - if *start* == `True`, data is a dictionary of\n attributes\n\n - if *start* == `False`, data is a string containing\n the text content of the element\n\n - *pos*: Tuple (*line*, *col*) indicating the source of the\n event.\n \"\"\"\n with _convert_to_fd_or_read_function(source) as fd:\n if _debug_python_based_parser:\n context = _slow_iterparse(fd)\n else:\n context = _fast_iterparse(fd)\n yield iter(context)\n\n\ndef get_xml_encoding(source):\n \"\"\"\n Determine the encoding of an XML file by reading its header.\n\n Parameters\n ----------\n source : path-like, readable file-like, or callable\n Handle that contains the data or function that reads it.\n If a function or callable object, it must directly read from a stream.\n Non-callable objects must define a ``read`` method.\n\n Returns\n -------\n encoding : str\n \"\"\"\n with get_xml_iterator(source) as iterator:\n start, tag, data, pos = next(iterator)\n if not start or tag != 'xml':\n raise OSError('Invalid XML file')\n\n # The XML spec says that no encoding === utf-8\n return data.get('encoding') or 'utf-8'\n\n\ndef xml_readlines(source):\n \"\"\"\n Get the lines from a given XML file. Correctly determines the\n encoding and always returns unicode.\n\n Parameters\n ----------\n source : path-like, readable file-like, or callable\n Handle that contains the data or function that reads it.\n If a function or callable object, it must directly read from a stream.\n Non-callable objects must define a ``read`` method.\n\n Returns\n -------\n lines : list of unicode\n \"\"\"\n encoding = get_xml_encoding(source)\n\n with data.get_readable_fileobj(source, encoding=encoding) as input:\n input.seek(0)\n xml_lines = input.readlines()\n\n return xml_lines\n"}}},{"rowIdx":1367,"cells":{"hash":{"kind":"string","value":"5a9f3e7b2afe92c114c224c7aeb929fe04a77d42110d2657678051a25dfdde4a"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport json\nimport locale\nimport os\nimport urllib.error\nfrom datetime import datetime\n\nimport pytest\nimport numpy as np\n\nfrom astropy.utils import data, misc\nfrom astropy.io import fits\n\n\ndef test_isiterable():\n assert misc.isiterable(2) is False\n assert misc.isiterable([2]) is True\n assert misc.isiterable([1, 2, 3]) is True\n assert misc.isiterable(np.array(2)) is False\n assert misc.isiterable(np.array([1, 2, 3])) is True\n\n\ndef test_signal_number_to_name_no_failure():\n # Regression test for #5340: ensure signal_number_to_name throws no\n # AttributeError (it used \".iteritems()\" which was removed in Python3).\n misc.signal_number_to_name(0)\n\n\n@pytest.mark.remote_data\ndef test_api_lookup():\n try:\n strurl = misc.find_api_page('astropy.utils.misc', 'dev', False,\n timeout=5)\n objurl = misc.find_api_page(misc, 'dev', False, timeout=5)\n except urllib.error.URLError:\n if os.environ.get('CI', False):\n pytest.xfail('Timed out in CI')\n else:\n raise\n\n assert strurl == objurl\n assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc' # noqa\n\n # Try a non-dev version\n objurl = misc.find_api_page(misc, 'v3.2.1', False, timeout=3)\n assert objurl == 'https://docs.astropy.org/en/v3.2.1/utils/index.html#module-astropy.utils.misc' # noqa\n\n\ndef test_skip_hidden():\n path = data.get_pkg_data_path('data')\n for root, dirs, files in os.walk(path):\n assert '.hidden_file.txt' in files\n assert 'local.dat' in files\n # break after the first level since the data dir contains some other\n # subdirectories that don't have these files\n break\n\n for root, dirs, files in misc.walk_skip_hidden(path):\n assert '.hidden_file.txt' not in files\n assert 'local.dat' in files\n break\n\n\ndef test_JsonCustomEncoder():\n from astropy import units as u\n assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'\n assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'\n assert json.dumps({1, 2, 1}, cls=misc.JsonCustomEncoder) == '[1, 2]'\n assert json.dumps(b'hello world \\xc3\\x85',\n cls=misc.JsonCustomEncoder) == '\"hello world \\\\u00c5\"'\n assert json.dumps({1: 2},\n cls=misc.JsonCustomEncoder) == '{\"1\": 2}' # default\n assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{\"1\": \"m\"}'\n # Quantities\n tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)\n newd = json.loads(tmp)\n tmpd = {\"a\": {\"unit\": \"cm\", \"value\": 5.0}}\n assert newd == tmpd\n tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)\n newd = json.loads(tmp2)\n tmpd = {\"a\": {\"unit\": \"cm\", \"value\": [0., 1.]}}\n assert newd == tmpd\n tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)\n newd = json.loads(tmp3)\n tmpd = {\"a\": {\"unit\": \"erg / s\", \"value\": [0., 1.]}}\n assert newd == tmpd\n\n\ndef test_JsonCustomEncoder_FITS_rec_from_files():\n with fits.open(fits.util.get_testdata_filepath('variable_length_table.fits')) as hdul:\n assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \\\n \"[[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]\"\n\n with fits.open(fits.util.get_testdata_filepath('btable.fits')) as hdul:\n assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \\\n '[[1, \"Sirius\", -1.4500000476837158, \"A1V\"], ' \\\n '[2, \"Canopus\", -0.7300000190734863, \"F0Ib\"], ' \\\n '[3, \"Rigil Kent\", -0.10000000149011612, \"G2V\"]]'\n\n with fits.open(fits.util.get_testdata_filepath('table.fits')) as hdul:\n assert json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder) == \\\n '[[\"NGC1001\", 11.100000381469727], ' \\\n '[\"NGC1002\", 12.300000190734863], ' \\\n '[\"NGC1003\", 15.199999809265137]]'\n\n\ndef test_set_locale():\n # First, test if the required locales are available\n current = locale.setlocale(locale.LC_ALL)\n try:\n locale.setlocale(locale.LC_ALL, 'en_US.utf8')\n locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')\n except locale.Error as e:\n pytest.skip(f'Locale error: {e}')\n finally:\n locale.setlocale(locale.LC_ALL, current)\n\n date = datetime(2000, 10, 1, 0, 0, 0)\n day_mon = date.strftime('%a, %b')\n\n with misc._set_locale('en_US.utf8'):\n assert date.strftime('%a, %b') == 'Sun, Oct'\n\n with misc._set_locale('fr_FR.utf8'):\n assert date.strftime('%a, %b') == 'dim., oct.'\n\n # Back to original\n assert date.strftime('%a, %b') == day_mon\n\n with misc._set_locale(current):\n assert date.strftime('%a, %b') == day_mon\n\n\ndef test_dtype_bytes_or_chars():\n assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8\n assert misc.dtype_bytes_or_chars(np.dtype(object)) is None\n assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4\n assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5\n assert misc.dtype_bytes_or_chars(np.array('12345').dtype) == 5\n"}}},{"rowIdx":1368,"cells":{"hash":{"kind":"string","value":"22a4668e3a745dce40d8f03f0b8dc58aaecd240cc262c190acc3a53bfd9d4852"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\n\nfrom astropy.utils.data_info import dtype_info_name\nfrom astropy.table import QTable\nfrom astropy.table.index import SlicedIndex\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\n\n\nSTRING_TYPE_NAMES = {(True, 'S'): 'bytes',\n (True, 'U'): 'str'}\n\nDTYPE_TESTS = ((np.array(b'abcd').dtype, STRING_TYPE_NAMES[(True, 'S')] + '4'),\n (np.array('abcd').dtype, STRING_TYPE_NAMES[(True, 'U')] + '4'),\n ('S4', STRING_TYPE_NAMES[(True, 'S')] + '4'),\n ('U4', STRING_TYPE_NAMES[(True, 'U')] + '4'),\n (np.void, 'void'),\n (np.int32, 'int32'),\n (bool, 'bool'),\n (float, 'float64'),\n (' -1\n\n\ndef test_find_by_hash(valid_urls, temp_cache):\n testurl, contents = next(valid_urls)\n p = download_file(testurl, cache=True)\n hash = compute_hash(p)\n\n hashstr = \"hash/\" + hash\n\n fnout = get_pkg_data_filename(hashstr)\n assert os.path.isfile(fnout)\n clear_download_cache(fnout)\n assert not os.path.isfile(fnout)\n\n\n@pytest.mark.remote_data(source=\"astropy\")\ndef test_find_invalid():\n # this is of course not a real data file and not on any remote server, but\n # it should *try* to go to the remote server\n with pytest.raises(urllib.error.URLError):\n get_pkg_data_filename(\n \"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli\"\n )\n\n\n@pytest.mark.parametrize(\"package\", [None, \"astropy\", \"numpy\"])\ndef test_get_invalid(package):\n \"\"\"Test can create a file path to an invalid file.\"\"\"\n path = get_pkg_data_path(\"kjfrhgjkla\", \"hgiulrhgiu\", package=package)\n assert not os.path.isfile(path)\n assert not os.path.isdir(path)\n\n\n# Package data functions\n@pytest.mark.parametrize(\n (\"filename\"), [\"local.dat\", \"local.dat.gz\", \"local.dat.bz2\", \"local.dat.xz\"]\n)\ndef test_local_data_obj(filename):\n if ((not HAS_BZ2 and \"bz2\" in filename) or\n (not HAS_LZMA and \"xz\" in filename)):\n with pytest.raises(ValueError) as e:\n with get_pkg_data_fileobj(\n os.path.join(\"data\", filename), encoding=\"binary\"\n ) as f:\n f.readline()\n # assert f.read().rstrip() == b'CONTENT'\n assert \" format files are not supported\" in str(e.value)\n else:\n with get_pkg_data_fileobj(\n os.path.join(\"data\", filename), encoding=\"binary\"\n ) as f:\n f.readline()\n assert f.read().rstrip() == b\"CONTENT\"\n\n\n@pytest.fixture(params=[\"invalid.dat.bz2\", \"invalid.dat.gz\"])\ndef bad_compressed(request, tmpdir):\n # These contents have valid headers for their respective file formats, but\n # are otherwise malformed and invalid.\n bz_content = b\"BZhinvalid\"\n gz_content = b\"\\x1f\\x8b\\x08invalid\"\n\n datafile = tmpdir.join(request.param)\n filename = datafile.strpath\n\n if filename.endswith(\".bz2\"):\n contents = bz_content\n elif filename.endswith(\".gz\"):\n contents = gz_content\n else:\n contents = \"invalid\"\n\n datafile.write(contents, mode=\"wb\")\n\n return filename\n\n\ndef test_local_data_obj_invalid(bad_compressed):\n is_bz2 = bad_compressed.endswith(\".bz2\")\n is_xz = bad_compressed.endswith(\".xz\")\n\n # Note, since these invalid files are created on the fly in order to avoid\n # problems with detection by antivirus software\n # (see https://github.com/astropy/astropy/issues/6520), it is no longer\n # possible to use ``get_pkg_data_fileobj`` to read the files. Technically,\n # they're not local anymore: they just live in a temporary directory\n # created by pytest. However, we can still use get_readable_fileobj for the\n # test.\n if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):\n with pytest.raises(ModuleNotFoundError,\n match=r'does not provide the [lb]z[2m]a? module\\.'):\n with get_readable_fileobj(bad_compressed, encoding=\"binary\") as f:\n f.read()\n else:\n with get_readable_fileobj(bad_compressed, encoding=\"binary\") as f:\n assert f.read().rstrip().endswith(b\"invalid\")\n\n\ndef test_local_data_name():\n assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith(\"local.dat\")\n\n # TODO: if in the future, the root data/ directory is added in, the below\n # test should be uncommented and the README.rst should be replaced with\n # whatever file is there\n\n # get something in the astropy root\n # fnout2 = get_pkg_data_filename('../../data/README.rst')\n # assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')\n\n\ndef test_data_name_third_party_package():\n \"\"\"Regression test for issue #1256\n\n Tests that `get_pkg_data_filename` works in a third-party package that\n doesn't make any relative imports from the module it's used from.\n\n Uses a test package under ``data/test_package``.\n \"\"\"\n\n # Get the actual data dir:\n data_dir = os.path.join(os.path.dirname(__file__), \"data\")\n\n sys.path.insert(0, data_dir)\n try:\n import test_package\n\n filename = test_package.get_data_filename()\n assert os.path.normcase(filename) == (\n os.path.normcase(os.path.join(data_dir, \"test_package\", \"data\", \"foo.txt\"))\n )\n finally:\n sys.path.pop(0)\n\n\ndef test_local_data_nonlocalfail():\n # this would go *outside* the astropy tree\n with pytest.raises(RuntimeError):\n get_pkg_data_filename(\"../../../data/README.rst\")\n\n\ndef test_compute_hash(tmpdir):\n\n rands = b\"1234567890abcdefghijklmnopqrstuvwxyz\"\n\n filename = tmpdir.join(\"tmp.dat\").strpath\n\n with open(filename, \"wb\") as ntf:\n ntf.write(rands)\n ntf.flush()\n\n chhash = compute_hash(filename)\n shash = hashlib.md5(rands).hexdigest()\n\n assert chhash == shash\n\n\ndef test_get_pkg_data_contents():\n\n with get_pkg_data_fileobj(\"data/local.dat\") as f:\n contents1 = f.read()\n\n contents2 = get_pkg_data_contents(\"data/local.dat\")\n\n assert contents1 == contents2\n\n\n@pytest.mark.remote_data(source=\"astropy\")\ndef test_data_noastropy_fallback(monkeypatch):\n \"\"\"\n Tests to make sure the default behavior when the cache directory can't\n be located is correct\n \"\"\"\n\n # better yet, set the configuration to make sure the temp files are deleted\n conf.delete_temporary_downloads_at_exit = True\n\n # make sure the config and cache directories are not searched\n monkeypatch.setenv(\"XDG_CONFIG_HOME\", \"foo\")\n monkeypatch.delenv(\"XDG_CONFIG_HOME\")\n monkeypatch.setenv(\"XDG_CACHE_HOME\", \"bar\")\n monkeypatch.delenv(\"XDG_CACHE_HOME\")\n\n monkeypatch.setattr(paths.set_temp_config, \"_temp_path\", None)\n monkeypatch.setattr(paths.set_temp_cache, \"_temp_path\", None)\n\n # make sure the _find_or_create_astropy_dir function fails as though the\n # astropy dir could not be accessed\n def osraiser(dirnm, linkto, pkgname=None):\n raise OSError()\n monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)\n\n with pytest.raises(OSError):\n # make sure the config dir search fails\n paths.get_cache_dir(rootname='astropy')\n\n with pytest.warns(CacheMissingWarning) as warning_lines:\n fnout = download_file(TESTURL, cache=True)\n n_warns = len(warning_lines)\n\n partial_warn_msgs = ['remote data cache could not be accessed', 'temporary file']\n if n_warns == 4:\n partial_warn_msgs.extend(['socket', 'socket'])\n\n for wl in warning_lines:\n cur_w = str(wl).lower()\n for i, partial_msg in enumerate(partial_warn_msgs):\n if partial_msg in cur_w:\n del partial_warn_msgs[i]\n break\n assert len(partial_warn_msgs) == 0, f'Got some unexpected warnings: {partial_warn_msgs}'\n\n assert n_warns in (2, 4), f'Expected 2 or 4 warnings, got {n_warns}'\n\n assert os.path.isfile(fnout)\n\n # clearing the cache should be a no-up that doesn't affect fnout\n with pytest.warns(CacheMissingWarning,\n match=r\".*Not clearing data cache - cache inaccessible.*\"):\n clear_download_cache(TESTURL)\n assert os.path.isfile(fnout)\n\n # now remove it so tests don't clutter up the temp dir this should get\n # called at exit, anyway, but we do it here just to make sure it's working\n # correctly\n _deltemps()\n assert not os.path.isfile(fnout)\n\n # now try with no cache\n fnnocache = download_file(TESTURL, cache=False)\n with open(fnnocache, \"rb\") as page:\n assert page.read().decode(\"utf-8\").find(\"Astropy\") > -1\n\n # no warnings should be raise in fileobj because cache is unnecessary\n\n\n@pytest.mark.parametrize(\n (\"filename\"),\n [\n \"unicode.txt\",\n \"unicode.txt.gz\",\n pytest.param(\n \"unicode.txt.bz2\",\n marks=pytest.mark.xfail(not HAS_BZ2, reason=\"no bz2 support\"),\n ),\n pytest.param(\n \"unicode.txt.xz\",\n marks=pytest.mark.xfail(not HAS_LZMA, reason=\"no lzma support\"),\n ),\n ],\n)\ndef test_read_unicode(filename):\n\n contents = get_pkg_data_contents(os.path.join(\"data\", filename), encoding=\"utf-8\")\n assert isinstance(contents, str)\n contents = contents.splitlines()[1]\n assert contents == \"האסטרונומי פייתון\"\n\n contents = get_pkg_data_contents(os.path.join(\"data\", filename), encoding=\"binary\")\n assert isinstance(contents, bytes)\n x = contents.splitlines()[1]\n assert x == (\n b\"\\xff\\xd7\\x94\\xd7\\x90\\xd7\\xa1\\xd7\\x98\\xd7\\xa8\\xd7\\x95\\xd7\\xa0\"\n b\"\\xd7\\x95\\xd7\\x9e\\xd7\\x99 \\xd7\\xa4\\xd7\\x99\\xd7\\x99\\xd7\\xaa\\xd7\\x95\\xd7\\x9f\"[1:]\n )\n\n\ndef test_compressed_stream():\n\n gzipped_data = (\n b\"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ\"\n b\"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA==\"\n )\n gzipped_data = base64.b64decode(gzipped_data)\n assert isinstance(gzipped_data, bytes)\n\n class FakeStream:\n \"\"\"\n A fake stream that has `read`, but no `seek`.\n \"\"\"\n\n def __init__(self, data):\n self.data = data\n\n def read(self, nbytes=None):\n if nbytes is None:\n result = self.data\n self.data = b\"\"\n else:\n result = self.data[:nbytes]\n self.data = self.data[nbytes:]\n return result\n\n stream = FakeStream(gzipped_data)\n with get_readable_fileobj(stream, encoding=\"binary\") as f:\n f.readline()\n assert f.read().rstrip() == b\"CONTENT\"\n\n\n@pytest.mark.remote_data(source=\"astropy\")\ndef test_invalid_location_download_raises_urlerror():\n \"\"\"\n checks that download_file gives a URLError and not an AttributeError,\n as its code pathway involves some fiddling with the exception.\n \"\"\"\n\n with pytest.raises(urllib.error.URLError):\n download_file(\"http://www.astropy.org/nonexistentfile\")\n\n\ndef test_invalid_location_download_noconnect():\n \"\"\"\n checks that download_file gives an OSError if the socket is blocked\n \"\"\"\n\n # This should invoke socket's monkeypatched failure\n with pytest.raises(OSError):\n download_file(\"http://astropy.org/nonexistentfile\")\n\n\n@pytest.mark.remote_data(source=\"astropy\")\ndef test_is_url_in_cache_remote():\n\n assert not is_url_in_cache(\"http://astropy.org/nonexistentfile\")\n\n download_file(TESTURL, cache=True, show_progress=False)\n assert is_url_in_cache(TESTURL)\n\n\ndef test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):\n\n testurl, contents = next(valid_urls)\n nonexistent = next(invalid_urls)\n\n assert not is_url_in_cache(testurl)\n assert not is_url_in_cache(nonexistent)\n\n download_file(testurl, cache=True, show_progress=False)\n assert is_url_in_cache(testurl)\n assert not is_url_in_cache(nonexistent)\n\n\n# If non-deterministic failure happens see\n# https://github.com/astropy/astropy/issues/9765\ndef test_check_download_cache(tmpdir, temp_cache, valid_urls, invalid_urls):\n testurl, testurl_contents = next(valid_urls)\n testurl2, testurl2_contents = next(valid_urls)\n\n zip_file_name = os.path.join(tmpdir, \"the.zip\")\n clear_download_cache()\n assert not check_download_cache()\n\n download_file(testurl, cache=True)\n check_download_cache()\n download_file(testurl2, cache=True)\n check_download_cache()\n\n export_download_cache(zip_file_name, [testurl, testurl2])\n check_download_cache()\n\n clear_download_cache(testurl2)\n check_download_cache()\n\n import_download_cache(zip_file_name, [testurl])\n check_download_cache()\n\n\ndef test_export_import_roundtrip_one(tmpdir, temp_cache, valid_urls):\n testurl, contents = next(valid_urls)\n f = download_file(testurl, cache=True, show_progress=False)\n assert get_file_contents(f) == contents\n\n initial_urls_in_cache = set(get_cached_urls())\n zip_file_name = os.path.join(tmpdir, \"the.zip\")\n\n export_download_cache(zip_file_name, [testurl])\n clear_download_cache(testurl)\n import_download_cache(zip_file_name)\n assert is_url_in_cache(testurl)\n assert set(get_cached_urls()) == initial_urls_in_cache\n assert (\n get_file_contents(download_file(testurl, cache=True, show_progress=False))\n == contents\n )\n\n\ndef test_export_url_not_present(temp_cache, valid_urls):\n testurl, contents = next(valid_urls)\n with NamedTemporaryFile(\"wb\") as zip_file:\n assert not is_url_in_cache(testurl)\n with pytest.raises(KeyError):\n export_download_cache(zip_file, [testurl])\n\n\ndef test_import_one(tmpdir, temp_cache, valid_urls):\n testurl, testurl_contents = next(valid_urls)\n testurl2, testurl2_contents = next(valid_urls)\n zip_file_name = os.path.join(tmpdir, \"the.zip\")\n\n download_file(testurl, cache=True)\n download_file(testurl2, cache=True)\n assert is_url_in_cache(testurl2)\n export_download_cache(zip_file_name, [testurl, testurl2])\n clear_download_cache(testurl)\n clear_download_cache(testurl2)\n import_download_cache(zip_file_name, [testurl])\n assert is_url_in_cache(testurl)\n assert not is_url_in_cache(testurl2)\n\n\ndef test_export_import_roundtrip(tmpdir, temp_cache, valid_urls):\n zip_file_name = os.path.join(tmpdir, \"the.zip\")\n for u, c in islice(valid_urls, FEW):\n download_file(u, cache=True)\n\n initial_urls_in_cache = set(get_cached_urls())\n\n export_download_cache(zip_file_name)\n clear_download_cache()\n import_download_cache(zip_file_name)\n\n assert set(get_cached_urls()) == initial_urls_in_cache\n\n\ndef test_export_import_roundtrip_stream(temp_cache, valid_urls):\n for u, c in islice(valid_urls, FEW):\n download_file(u, cache=True)\n initial_urls_in_cache = set(get_cached_urls())\n\n with io.BytesIO() as f:\n export_download_cache(f)\n b = f.getvalue()\n clear_download_cache()\n with io.BytesIO(b) as f:\n import_download_cache(f)\n\n assert set(get_cached_urls()) == initial_urls_in_cache\n\n\ndef test_export_overwrite_flag_works(temp_cache, valid_urls, tmpdir):\n fn = tmpdir / \"f.zip\"\n c = b\"Some contents\\nto check later\"\n with open(fn, \"wb\") as f:\n f.write(c)\n for u, _ in islice(valid_urls, FEW):\n download_file(u, cache=True)\n\n with pytest.raises(FileExistsError):\n export_download_cache(fn)\n assert get_file_contents(fn, encoding='binary') == c\n\n export_download_cache(fn, overwrite=True)\n assert get_file_contents(fn, encoding='binary') != c\n\n\ndef test_export_import_roundtrip_different_location(tmpdir, valid_urls):\n original_cache = tmpdir / \"original\"\n os.mkdir(original_cache)\n zip_file_name = tmpdir / \"the.zip\"\n\n urls = list(islice(valid_urls, FEW))\n initial_urls_in_cache = {u for (u, c) in urls}\n with paths.set_temp_cache(original_cache):\n for u, c in urls:\n download_file(u, cache=True)\n assert set(get_cached_urls()) == initial_urls_in_cache\n export_download_cache(zip_file_name)\n\n new_cache = tmpdir / \"new\"\n os.mkdir(new_cache)\n with paths.set_temp_cache(new_cache):\n import_download_cache(zip_file_name)\n check_download_cache()\n assert set(get_cached_urls()) == initial_urls_in_cache\n for (u, c) in urls:\n assert get_file_contents(download_file(u, cache=True)) == c\n\n\ndef test_cache_size_is_zero_when_empty(temp_cache):\n assert not get_cached_urls()\n assert cache_total_size() == 0\n\n\ndef test_cache_size_changes_correctly_when_files_are_added_and_removed(\n temp_cache, valid_urls\n):\n u, c = next(valid_urls)\n clear_download_cache(u)\n s_i = cache_total_size()\n download_file(u, cache=True)\n assert cache_total_size() == s_i + len(c) + len(u.encode(\"utf-8\"))\n clear_download_cache(u)\n assert cache_total_size() == s_i\n\n\ndef test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):\n r = []\n for a, a_c in islice(valid_urls, FEW):\n a_f = download_file(a, cache=True)\n r.append((a, a_c, a_f))\n assert set(cache_contents().keys()) == set(get_cached_urls())\n for (u, c, h) in r:\n assert cache_contents()[u] == h\n\n\n@pytest.mark.parametrize('desired_size',\n [1_000_000_000_000_000_000, 1 * _u.Ebyte])\ndef test_free_space_checker_huge(tmpdir, desired_size):\n with pytest.raises(OSError):\n check_free_space_in_dir(str(tmpdir), desired_size)\n\n\ndef test_get_free_space_file_directory(tmpdir):\n fn = tmpdir / \"file\"\n with open(fn, \"w\"):\n pass\n with pytest.raises(OSError):\n get_free_space_in_dir(str(fn))\n\n free_space = get_free_space_in_dir(str(tmpdir))\n assert free_space > 0 and not hasattr(free_space, 'unit')\n\n # TODO: If unit=True starts to auto-guess prefix, this needs updating.\n free_space = get_free_space_in_dir(str(tmpdir), unit=True)\n assert free_space > 0 and free_space.unit == _u.byte\n\n free_space = get_free_space_in_dir(str(tmpdir), unit=_u.Mbit)\n assert free_space > 0 and free_space.unit == _u.Mbit\n\n\ndef test_download_file_bogus_settings(invalid_urls, temp_cache):\n u = next(invalid_urls)\n with pytest.raises(KeyError):\n download_file(u, sources=[])\n\n\ndef test_download_file_local_directory(tmpdir):\n \"\"\"Make sure we get a URLError rather than OSError even if it's a\n local directory.\"\"\"\n with pytest.raises(urllib.request.URLError):\n download_file(url_to(tmpdir))\n\n\ndef test_download_file_schedules_deletion(valid_urls):\n u, c = next(valid_urls)\n f = download_file(u)\n assert f in _tempfilestodel\n # how to test deletion actually occurs?\n\n\ndef test_clear_download_cache_refuses_to_delete_outside_the_cache(tmpdir):\n fn = os.path.abspath(os.path.join(tmpdir, \"file\"))\n with open(fn, \"w\") as f:\n f.write(\"content\")\n assert os.path.exists(fn)\n with pytest.raises(RuntimeError):\n clear_download_cache(fn)\n assert os.path.exists(fn)\n\n\ndef test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):\n u, c = next(valid_urls)\n download_file(u, cache=True)\n dldir = _get_download_cache_loc()\n bf = os.path.abspath(os.path.join(dldir, \"bogus\"))\n with open(bf, \"wt\") as f:\n f.write(\"bogus file that exists\")\n with pytest.raises(CacheDamaged) as e:\n check_download_cache()\n assert bf in e.value.bad_files\n clear_download_cache()\n\n\ndef test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):\n u, c = next(valid_urls)\n f = download_file(u, cache=True)\n bf = os.path.abspath(os.path.join(os.path.dirname(f), \"bogus\"))\n with open(bf, \"wt\") as f:\n f.write(\"bogus file that exists\")\n with pytest.raises(CacheDamaged) as e:\n check_download_cache()\n assert bf in e.value.bad_files\n clear_download_cache()\n\n\ndef test_check_download_cache_cleanup(temp_cache, valid_urls):\n u, c = next(valid_urls)\n fn = download_file(u, cache=True)\n dldir = _get_download_cache_loc()\n\n bf1 = os.path.abspath(os.path.join(dldir, \"bogus1\"))\n with open(bf1, \"wt\") as f:\n f.write(\"bogus file that exists\")\n\n bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), \"bogus2\"))\n with open(bf2, \"wt\") as f:\n f.write(\"other bogus file that exists\")\n\n bf3 = os.path.abspath(os.path.join(dldir, \"contents\"))\n with open(bf3, \"wt\") as f:\n f.write(\"awkwardly-named bogus file that exists\")\n\n u2, c2 = next(valid_urls)\n f2 = download_file(u, cache=True)\n os.unlink(f2)\n bf4 = os.path.dirname(f2)\n\n with pytest.raises(CacheDamaged) as e:\n check_download_cache()\n assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}\n for bf in e.value.bad_files:\n clear_download_cache(bf)\n # download cache will be checked on exit\n\n\ndef test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):\n u, _ = next(valid_urls)\n download_file(u, cache=True)\n download_file(u, cache=\"update\")\n\n\ndef test_cache_dir_is_actually_a_file(tmpdir, valid_urls):\n \"\"\"Ensure that bogus cache settings are handled sensibly.\n\n Because the user can specify the cache location in a config file, and\n because they might try to deduce the location by looking around at what's\n in their directory tree, and because the cache directory is actual several\n tree levels down from the directory set in the config file, it's important\n to check what happens if each of the steps in the path is wrong somehow.\n \"\"\"\n def check_quietly_ignores_bogus_cache():\n \"\"\"We want a broken cache to produce a warning but then astropy should\n act like there isn't a cache.\n \"\"\"\n with pytest.warns(CacheMissingWarning):\n assert not get_cached_urls()\n with pytest.warns(CacheMissingWarning):\n assert not is_url_in_cache(\"http://www.example.com/\")\n with pytest.warns(CacheMissingWarning):\n assert not cache_contents()\n with pytest.warns(CacheMissingWarning):\n u, c = next(valid_urls)\n r = download_file(u, cache=True)\n assert get_file_contents(r) == c\n # check the filename r appears in a warning message?\n # check r is added to the delete_at_exit list?\n # in fact should there be testing of the delete_at_exit mechanism,\n # as far as that is possible?\n with pytest.warns(CacheMissingWarning):\n assert not is_url_in_cache(u)\n with pytest.warns(CacheMissingWarning):\n with pytest.raises(OSError):\n check_download_cache()\n\n dldir = _get_download_cache_loc()\n # set_temp_cache acts weird if it is pointed at a file (see below)\n # but we want to see what happens when the cache is pointed\n # at a file instead of a directory, so make a directory we can\n # replace later.\n fn = str(tmpdir / \"file\")\n ct = \"contents\\n\"\n os.mkdir(fn)\n with paths.set_temp_cache(fn):\n shutil.rmtree(fn)\n with open(fn, \"w\") as f:\n f.write(ct)\n with pytest.raises(OSError):\n paths.get_cache_dir()\n check_quietly_ignores_bogus_cache()\n assert dldir == _get_download_cache_loc()\n assert get_file_contents(fn) == ct, \"File should not be harmed.\"\n\n # See what happens when set_temp_cache is pointed at a file\n with pytest.raises(OSError):\n with paths.set_temp_cache(fn):\n pass\n assert dldir == _get_download_cache_loc()\n assert get_file_contents(str(fn)) == ct\n\n # Now the cache directory is normal but the subdirectory it wants\n # to make is a file\n cd = str(tmpdir / \"astropy\")\n with open(cd, \"w\") as f:\n f.write(ct)\n with paths.set_temp_cache(tmpdir):\n check_quietly_ignores_bogus_cache()\n assert dldir == _get_download_cache_loc()\n assert get_file_contents(cd) == ct\n os.remove(cd)\n\n # Ditto one level deeper\n os.makedirs(cd)\n cd = str(tmpdir / \"astropy\" / \"download\")\n with open(cd, \"w\") as f:\n f.write(ct)\n with paths.set_temp_cache(tmpdir):\n check_quietly_ignores_bogus_cache()\n assert dldir == _get_download_cache_loc()\n assert get_file_contents(cd) == ct\n os.remove(cd)\n\n # Ditto another level deeper\n os.makedirs(cd)\n cd = str(tmpdir / \"astropy\" / \"download\" / \"url\")\n with open(cd, \"w\") as f:\n f.write(ct)\n with paths.set_temp_cache(tmpdir):\n check_quietly_ignores_bogus_cache()\n assert dldir == _get_download_cache_loc()\n assert get_file_contents(cd) == ct\n os.remove(cd)\n\n\ndef test_get_fileobj_str(a_file):\n fn, c = a_file\n with get_readable_fileobj(str(fn)) as rf:\n assert rf.read() == c\n\n\ndef test_get_fileobj_localpath(a_file):\n fn, c = a_file\n with get_readable_fileobj(py.path.local(fn)) as rf:\n assert rf.read() == c\n\n\ndef test_get_fileobj_pathlib(a_file):\n fn, c = a_file\n with get_readable_fileobj(pathlib.Path(fn)) as rf:\n assert rf.read() == c\n\n\ndef test_get_fileobj_binary(a_binary_file):\n fn, c = a_binary_file\n with get_readable_fileobj(fn, encoding=\"binary\") as rf:\n assert rf.read() == c\n\n\ndef test_get_fileobj_already_open_text(a_file):\n fn, c = a_file\n with open(fn) as f:\n with get_readable_fileobj(f) as rf:\n with pytest.raises(TypeError):\n rf.read()\n\n\ndef test_get_fileobj_already_open_binary(a_file):\n fn, c = a_file\n with open(fn, \"rb\") as f:\n with get_readable_fileobj(f) as rf:\n assert rf.read() == c\n\n\ndef test_get_fileobj_binary_already_open_binary(a_binary_file):\n fn, c = a_binary_file\n with open(fn, \"rb\") as f:\n with get_readable_fileobj(f, encoding=\"binary\") as rf:\n assert rf.read() == c\n\n\ndef test_cache_contents_not_writable(temp_cache, valid_urls):\n c = cache_contents()\n with pytest.raises(TypeError):\n c[\"foo\"] = 7\n u, _ = next(valid_urls)\n download_file(u, cache=True)\n c = cache_contents()\n assert u in c\n with pytest.raises(TypeError):\n c[\"foo\"] = 7\n\n\ndef test_cache_relocatable(tmpdir, valid_urls):\n u, c = next(valid_urls)\n d1 = tmpdir / \"1\"\n d2 = tmpdir / \"2\"\n os.mkdir(d1)\n with paths.set_temp_cache(d1):\n p1 = download_file(u, cache=True)\n assert is_url_in_cache(u)\n assert get_file_contents(p1) == c\n shutil.copytree(d1, d2)\n clear_download_cache()\n with paths.set_temp_cache(d2):\n assert is_url_in_cache(u)\n p2 = download_file(u, cache=True)\n assert p1 != p2\n assert os.path.exists(p2)\n clear_download_cache(p2)\n check_download_cache()\n\n\ndef test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):\n \"\"\"checks that get_readable_fileobj leaves no temporary files behind\"\"\"\n # Create a 'file://' URL pointing to a path on the local filesystem\n url = url_to(TESTLOCAL)\n\n # Save temporary files to a known location\n monkeypatch.setattr(tempfile, \"tempdir\", str(tmpdir))\n\n # Call get_readable_fileobj() as a context manager\n with get_readable_fileobj(url) as f:\n f.read()\n\n # Get listing of files in temporary directory\n tempdir_listing = tmpdir.listdir()\n\n # Assert that the temporary file was empty after get_readable_fileobj()\n # context manager finished running\n assert len(tempdir_listing) == 0\n\n\ndef test_path_objects_get_readable_fileobj():\n fpath = pathlib.Path(TESTLOCAL)\n with get_readable_fileobj(fpath) as f:\n assert f.read().rstrip() == (\n \"This file is used in the test_local_data_* testing functions\\nCONTENT\"\n )\n\n\ndef test_nested_get_readable_fileobj():\n \"\"\"Ensure fileobj state is as expected when get_readable_fileobj()\n is called inside another get_readable_fileobj().\n \"\"\"\n with get_readable_fileobj(TESTLOCAL, encoding=\"binary\") as fileobj:\n with get_readable_fileobj(fileobj, encoding=\"UTF-8\") as fileobj2:\n fileobj2.seek(1)\n fileobj.seek(1)\n\n # Theoretically, fileobj2 should be closed already here but it is not.\n # See https://github.com/astropy/astropy/pull/8675.\n # UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.\n # assert fileobj2.closed\n\n assert fileobj.closed and fileobj2.closed\n\n\ndef test_download_file_wrong_size(monkeypatch):\n\n @contextlib.contextmanager\n def mockurl(remote_url, timeout=None):\n yield MockURL()\n\n def mockurl_builder(*args, tlscontext=None, **kwargs):\n mock_opener = type('MockOpener', (object,), {})()\n mock_opener.open = mockurl\n return mock_opener\n\n class MockURL:\n def __init__(self):\n self.reader = io.BytesIO(b\"a\" * real_length)\n\n def info(self):\n return {\"Content-Length\": str(report_length)}\n\n def read(self, length=None):\n return self.reader.read(length)\n\n monkeypatch.setattr(astropy.utils.data, \"_build_urlopener\", mockurl_builder)\n\n with pytest.raises(urllib.error.ContentTooShortError):\n report_length = 1024\n real_length = 1023\n download_file(TESTURL, cache=False)\n\n with pytest.raises(urllib.error.URLError):\n report_length = 1023\n real_length = 1024\n download_file(TESTURL, cache=False)\n\n report_length = 1023\n real_length = 1023\n fn = download_file(TESTURL, cache=False)\n with open(fn, \"rb\") as f:\n assert f.read() == b\"a\" * real_length\n\n report_length = None\n real_length = 1023\n fn = download_file(TESTURL, cache=False)\n with open(fn, \"rb\") as f:\n assert f.read() == b\"a\" * real_length\n\n\ndef test_can_make_directories_readonly(tmpdir):\n try:\n with readonly_dir(tmpdir):\n assert is_dir_readonly(tmpdir)\n except AssertionError:\n if hasattr(os, \"geteuid\") and os.geteuid() == 0:\n pytest.skip(\n \"We are root, we can't make a directory un-writable with chmod.\"\n )\n elif platform.system() == \"Windows\":\n pytest.skip(\n \"It seems we can't make a driectory un-writable under Windows \"\n \"with chmod, in spite of the documentation.\"\n )\n else:\n raise\n\n\ndef test_can_make_files_readonly(tmpdir):\n fn = tmpdir / \"test\"\n c = \"contents\\n\"\n with open(fn, \"w\") as f:\n f.write(c)\n with readonly_dir(tmpdir):\n try:\n with open(fn, \"w+\") as f:\n f.write(\"more contents\\n\")\n except PermissionError:\n pass\n else:\n if hasattr(os, \"geteuid\") and os.geteuid() == 0:\n pytest.skip(\"We are root, we can't make a file un-writable with chmod.\")\n assert get_file_contents(fn) == c\n\n\ndef test_read_cache_readonly(readonly_cache):\n assert cache_contents() == readonly_cache\n\n\ndef test_download_file_cache_readonly(readonly_cache):\n for u in readonly_cache:\n f = download_file(u, cache=True)\n assert f == readonly_cache[u]\n\n\ndef test_import_file_cache_readonly(readonly_cache, tmpdir):\n filename = os.path.join(tmpdir, \"test-file\")\n content = \"Some text or other\"\n url = \"http://example.com/\"\n with open(filename, \"wt\") as f:\n f.write(content)\n\n with pytest.raises(OSError):\n import_file_to_cache(url, filename, remove_original=True)\n assert not is_url_in_cache(url)\n\n\ndef test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):\n u, c = next(valid_urls)\n with pytest.warns(CacheMissingWarning):\n f = download_file(u, cache=True)\n assert get_file_contents(f) == c\n assert not is_url_in_cache(u)\n\n\ndef test_download_file_cache_readonly_update(readonly_cache):\n for u in readonly_cache:\n with pytest.warns(CacheMissingWarning):\n f = download_file(u, cache=\"update\")\n assert f != readonly_cache[u]\n assert compute_hash(f) == compute_hash(readonly_cache[u])\n\n\ndef test_check_download_cache_works_if_readonly(readonly_cache):\n check_download_cache()\n\n\n# On Windows I can't make directories readonly. On CircleCI I can't make\n# anything readonly because the test suite runs as root. So on those platforms\n# none of the \"real\" tests above can be run. I can use monkeypatch to trigger\n# the readonly code paths, see the \"fake\" versions of the tests below, but I\n# don't totally trust those to completely explore what happens either, so we\n# have both. I couldn't see an easy way to parameterize over fixtures and share\n# tests.\n\n\ndef test_read_cache_fake_readonly(fake_readonly_cache):\n assert cache_contents() == fake_readonly_cache\n\n\ndef test_download_file_cache_fake_readonly(fake_readonly_cache):\n for u in fake_readonly_cache:\n f = download_file(u, cache=True)\n assert f == fake_readonly_cache[u]\n\n\ndef test_mkdtemp_cache_fake_readonly(fake_readonly_cache):\n with pytest.raises(OSError):\n tempfile.mkdtemp()\n\n\ndef test_TD_cache_fake_readonly(fake_readonly_cache):\n with pytest.raises(OSError):\n with TemporaryDirectory():\n pass\n\n\ndef test_import_file_cache_fake_readonly(fake_readonly_cache, tmpdir):\n filename = os.path.join(tmpdir, \"test-file\")\n content = \"Some text or other\"\n url = \"http://example.com/\"\n with open(filename, \"wt\") as f:\n f.write(content)\n\n with pytest.raises(OSError):\n import_file_to_cache(url, filename, remove_original=True)\n assert not is_url_in_cache(url)\n\n\ndef test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):\n u, c = next(valid_urls)\n with pytest.warns(CacheMissingWarning):\n f = download_file(u, cache=True)\n assert not is_url_in_cache(u)\n assert get_file_contents(f) == c\n\n\ndef test_download_file_cache_fake_readonly_update(fake_readonly_cache):\n for u in fake_readonly_cache:\n with pytest.warns(CacheMissingWarning):\n f = download_file(u, cache=\"update\")\n assert f != fake_readonly_cache[u]\n assert compute_hash(f) == compute_hash(fake_readonly_cache[u])\n\n\ndef test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):\n check_download_cache()\n\n\ndef test_pkgname_isolation(temp_cache, valid_urls):\n a = \"bogus_cache_name\"\n\n assert not get_cached_urls()\n assert not get_cached_urls(pkgname=a)\n\n for u, _ in islice(valid_urls, FEW):\n download_file(u, cache=True, pkgname=a)\n assert not get_cached_urls()\n assert len(get_cached_urls(pkgname=a)) == FEW\n assert cache_total_size() < cache_total_size(pkgname=a)\n\n for u, _ in islice(valid_urls, FEW+1):\n download_file(u, cache=True)\n assert len(get_cached_urls()) == FEW+1\n assert len(get_cached_urls(pkgname=a)) == FEW\n assert cache_total_size() > cache_total_size(pkgname=a)\n\n assert set(get_cached_urls()) == set(cache_contents().keys())\n assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())\n for i in get_cached_urls():\n assert is_url_in_cache(i)\n assert not is_url_in_cache(i, pkgname=a)\n for i in get_cached_urls(pkgname=a):\n assert not is_url_in_cache(i)\n assert is_url_in_cache(i, pkgname=a)\n\n # FIXME: need to break a cache to test whether we check the right one\n check_download_cache()\n check_download_cache(pkgname=a)\n\n # FIXME: check that cache='update' works\n\n u = get_cached_urls()[0]\n with pytest.raises(KeyError):\n download_file(u, cache=True, sources=[], pkgname=a)\n clear_download_cache(u, pkgname=a)\n assert len(get_cached_urls()) == FEW+1, \"wrong pkgname should do nothing\"\n assert len(get_cached_urls(pkgname=a)) == FEW, \"wrong pkgname should do nothing\"\n\n f = download_file(u, sources=[], cache=True)\n with pytest.raises(RuntimeError):\n clear_download_cache(f, pkgname=a)\n\n ua = get_cached_urls(pkgname=a)[0]\n with pytest.raises(KeyError):\n download_file(ua, cache=True, sources=[])\n\n fa = download_file(ua, sources=[], cache=True, pkgname=a)\n with pytest.raises(RuntimeError):\n clear_download_cache(fa)\n\n clear_download_cache(ua, pkgname=a)\n assert len(get_cached_urls()) == FEW+1\n assert len(get_cached_urls(pkgname=a)) == FEW-1\n\n clear_download_cache(u)\n assert len(get_cached_urls()) == FEW\n assert len(get_cached_urls(pkgname=a)) == FEW-1\n\n clear_download_cache(pkgname=a)\n assert len(get_cached_urls()) == FEW\n assert not get_cached_urls(pkgname=a)\n\n clear_download_cache()\n assert not get_cached_urls()\n assert not get_cached_urls(pkgname=a)\n\n\ndef test_transport_cache_via_zip(temp_cache, valid_urls):\n a = \"bogus_cache_name\"\n\n assert not get_cached_urls()\n assert not get_cached_urls(pkgname=a)\n\n for u, _ in islice(valid_urls, FEW):\n download_file(u, cache=True)\n\n with io.BytesIO() as f:\n export_download_cache(f)\n b = f.getvalue()\n with io.BytesIO(b) as f:\n import_download_cache(f, pkgname=a)\n\n check_download_cache()\n check_download_cache(pkgname=a)\n\n assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))\n cca = cache_contents(pkgname=a)\n for k, v in cache_contents().items():\n assert v != cca[k]\n assert get_file_contents(v) == get_file_contents(cca[k])\n clear_download_cache()\n\n with io.BytesIO() as f:\n export_download_cache(f, pkgname=a)\n b = f.getvalue()\n with io.BytesIO(b) as f:\n import_download_cache(f)\n\n assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))\n\n\ndef test_download_parallel_respects_pkgname(temp_cache, valid_urls):\n a = \"bogus_cache_name\"\n\n assert not get_cached_urls()\n assert not get_cached_urls(pkgname=a)\n\n download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)],\n pkgname=a)\n assert not get_cached_urls()\n assert len(get_cached_urls(pkgname=a)) == FEW\n\n\n@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,\n reason=\"This platform is unable to rename directories that are in use.\")\ndef test_removal_of_open_files(temp_cache, valid_urls):\n u, c = next(valid_urls)\n with open(download_file(u, cache=True)):\n clear_download_cache(u)\n assert not is_url_in_cache(u)\n check_download_cache()\n\n\n@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,\n reason=\"This platform is unable to rename directories that are in use.\")\ndef test_update_of_open_files(temp_cache, valid_urls):\n u, c = next(valid_urls)\n with open(download_file(u, cache=True)):\n u2, c2 = next(valid_urls)\n f = download_file(u, cache='update', sources=[u2])\n check_download_cache()\n assert is_url_in_cache(u)\n assert get_file_contents(f) == c2\n assert is_url_in_cache(u)\n\n\ndef test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):\n def no_rmtree(*args, **kwargs):\n warnings.warn(CacheMissingWarning(\"in use\"))\n raise PermissionError\n\n if CAN_RENAME_DIRECTORY_IN_USE:\n # This platform is able to remove files while in use.\n monkeypatch.setattr(astropy.utils.data, \"_rmtree\", no_rmtree)\n\n u, c = next(valid_urls)\n with open(download_file(u, cache=True)):\n with pytest.warns(CacheMissingWarning, match=r\".*in use.*\"):\n clear_download_cache(u)\n\n\ndef test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):\n def no_rmtree(*args, **kwargs):\n warnings.warn(CacheMissingWarning(\"in use\"))\n raise PermissionError\n\n if CAN_RENAME_DIRECTORY_IN_USE:\n # This platform is able to remove files while in use.\n monkeypatch.setattr(astropy.utils.data, \"_rmtree\", no_rmtree)\n\n u, c = next(valid_urls)\n with open(download_file(u, cache=True)):\n u2, c2 = next(valid_urls)\n with pytest.warns(CacheMissingWarning, match=r\".*in use.*\"):\n f = download_file(u, cache='update', sources=[u2])\n check_download_cache()\n assert is_url_in_cache(u)\n assert get_file_contents(f) == c2\n assert get_file_contents(download_file(u, cache=True, sources=[])) == c\n\n\ndef test_no_allow_internet(temp_cache, valid_urls):\n u, c = next(valid_urls)\n with conf.set_temp('allow_internet', False):\n with pytest.raises(urllib.error.URLError):\n download_file(u)\n assert not is_url_in_cache(u)\n with pytest.raises(urllib.error.URLError):\n # This will trigger the remote data error if it's allowed to touch the internet\n download_file(TESTURL)\n\n\ndef test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):\n u, c = next(valid_urls)\n download_file(u, cache=True)\n dldir = _get_download_cache_loc()\n\n bad_filename = os.path.join(dldir, \"contents\")\n assert is_url_in_cache(u)\n clear_download_cache(bad_filename)\n assert is_url_in_cache(u)\n\n\ndef test_clear_download_cache_variants(temp_cache, valid_urls):\n # deletion by contents filename\n u, c = next(valid_urls)\n f = download_file(u, cache=True)\n clear_download_cache(f)\n assert not is_url_in_cache(u)\n\n # deletion by url filename\n u, c = next(valid_urls)\n f = download_file(u, cache=True)\n clear_download_cache(os.path.join(os.path.dirname(f), 'url'))\n assert not is_url_in_cache(u)\n\n # deletion by hash directory name\n u, c = next(valid_urls)\n f = download_file(u, cache=True)\n clear_download_cache(os.path.dirname(f))\n assert not is_url_in_cache(u)\n\n # deletion by directory name with trailing slash\n u, c = next(valid_urls)\n f = download_file(u, cache=True)\n clear_download_cache(os.path.dirname(f)+'/')\n assert not is_url_in_cache(u)\n\n # deletion by hash of file contents\n u, c = next(valid_urls)\n f = download_file(u, cache=True)\n h = compute_hash(f)\n clear_download_cache(h)\n assert not is_url_in_cache(u)\n\n\n@pytest.mark.skipif(\"CI\", reason=\"Flaky on CI\")\n@pytest.mark.remote_data\ndef test_ftp_tls_auto(temp_cache):\n url = \"ftp://anonymous:mail%40astropy.org@gdc.cddis.eosdis.nasa.gov/pub/products/iers/finals2000A.all\" # noqa\n download_file(url)\n\n\n@pytest.mark.parametrize('base', [\"http://example.com\", \"https://example.com\"])\ndef test_url_trailing_slash(temp_cache, valid_urls, base):\n slash = base + \"/\"\n no_slash = base\n\n u, c = next(valid_urls)\n\n download_file(slash, cache=True, sources=[u])\n\n assert is_url_in_cache(no_slash)\n download_file(no_slash, cache=True, sources=[])\n clear_download_cache(no_slash)\n assert not is_url_in_cache(no_slash)\n assert not is_url_in_cache(slash)\n\n download_file(no_slash, cache=True, sources=[u])\n # see if implicit check_download_cache squawks\n\n\ndef test_empty_url(temp_cache, valid_urls):\n u, c = next(valid_urls)\n download_file('file://', cache=True, sources=[u])\n assert not is_url_in_cache('file:///')\n\n\n@pytest.mark.remote_data\ndef test_download_ftp_file_properly_handles_socket_error():\n faulty_url = \"ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all\"\n with pytest.raises(urllib.error.URLError) as excinfo:\n download_file(faulty_url)\n errmsg = excinfo.exconly()\n found_msg = False\n possible_msgs = ['Name or service not known',\n 'nodename nor servname provided, or not known',\n 'getaddrinfo failed',\n 'Temporary failure in name resolution',\n 'No address associated with hostname']\n for cur_msg in possible_msgs:\n if cur_msg in errmsg:\n found_msg = True\n break\n assert found_msg, f'Got {errmsg}, expected one of these: {\",\".join(possible_msgs)}'\n\n\n@pytest.mark.parametrize(\n ('s', 'ans'),\n [('http://googlecom', True),\n ('https://google.com', True),\n ('ftp://google.com', True),\n ('sftp://google.com', True),\n ('ssh://google.com', True),\n ('file:///c:/path/to/the%20file.txt', True),\n ('google.com', False),\n ('C:\\\\\\\\path\\\\\\\\file.docx', False),\n ('data://file', False)])\ndef test_string_is_url_check(s, ans):\n assert is_url(s) is ans\n"}}},{"rowIdx":1370,"cells":{"hash":{"kind":"string","value":"29c4559962daaed2eda4b91ac3da247da506dc27b0c02bdea4086ccd84da6385"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport io\n\nimport pytest\n\nfrom . import test_progress_bar_func\nfrom astropy.utils import console\nfrom astropy import units as u\n\n\nclass FakeTTY(io.StringIO):\n \"\"\"IOStream that fakes a TTY; provide an encoding to emulate an output\n stream with a specific encoding.\n \"\"\"\n\n def __new__(cls, encoding=None):\n # Return a new subclass of FakeTTY with the requested encoding\n if encoding is None:\n return super().__new__(cls)\n\n encoding = encoding\n cls = type(encoding.title() + cls.__name__, (cls,),\n {'encoding': encoding})\n\n return cls.__new__(cls)\n\n def __init__(self, encoding=None):\n super().__init__()\n\n def write(self, s):\n if isinstance(s, bytes):\n # Just allow this case to work\n s = s.decode('latin-1')\n elif self.encoding is not None:\n s.encode(self.encoding)\n\n return super().write(s)\n\n def isatty(self):\n return True\n\n\ndef test_fake_tty():\n # First test without a specified encoding; we should be able to write\n # arbitrary unicode strings\n f1 = FakeTTY()\n assert f1.isatty()\n f1.write('☃')\n assert f1.getvalue() == '☃'\n\n # Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when\n # trying to write a string containing non-ASCII characters\n f2 = FakeTTY('ascii')\n assert f2.isatty()\n assert f2.__class__.__name__ == 'AsciiFakeTTY'\n assert pytest.raises(UnicodeEncodeError, f2.write, '☃')\n assert f2.getvalue() == ''\n\n\n@pytest.mark.skipif(\"sys.platform.startswith('win')\")\ndef test_color_text():\n assert console._color_text(\"foo\", \"green\") == '\\033[0;32mfoo\\033[0m'\n\n\ndef test_color_print():\n # This stuff is hard to test, at least smoke test it\n console.color_print(\"foo\", \"green\")\n\n console.color_print(\"foo\", \"green\", \"bar\", \"red\")\n\n\ndef test_color_print2():\n # Test that this automatically detects that io.StringIO is\n # not a tty\n stream = io.StringIO()\n console.color_print(\"foo\", \"green\", file=stream)\n assert stream.getvalue() == 'foo\\n'\n\n stream = io.StringIO()\n console.color_print(\"foo\", \"green\", \"bar\", \"red\", \"baz\", file=stream)\n assert stream.getvalue() == 'foobarbaz\\n'\n\n\n@pytest.mark.skipif(\"sys.platform.startswith('win')\")\ndef test_color_print3():\n # Test that this thinks the FakeTTY is a tty and applies colors.\n\n stream = FakeTTY()\n console.color_print(\"foo\", \"green\", file=stream)\n assert stream.getvalue() == '\\x1b[0;32mfoo\\x1b[0m\\n'\n\n stream = FakeTTY()\n console.color_print(\"foo\", \"green\", \"bar\", \"red\", \"baz\", file=stream)\n assert stream.getvalue() == '\\x1b[0;32mfoo\\x1b[0m\\x1b[0;31mbar\\x1b[0mbaz\\n'\n\n\ndef test_color_print_unicode():\n console.color_print(\"überbær\", \"red\")\n\n\ndef test_color_print_invalid_color():\n console.color_print(\"foo\", \"unknown\")\n\n\ndef test_spinner_non_unicode_console():\n \"\"\"Regression test for #1760\n\n Ensures that the spinner can fall go into fallback mode when using the\n unicode spinner on a terminal whose default encoding cannot encode the\n unicode characters.\n \"\"\"\n\n stream = FakeTTY('ascii')\n chars = console.Spinner._default_unicode_chars\n\n with console.Spinner(\"Reticulating splines\", file=stream,\n chars=chars) as s:\n next(s)\n\n\ndef test_progress_bar():\n # This stuff is hard to test, at least smoke test it\n with console.ProgressBar(50) as bar:\n for i in range(50):\n bar.update()\n\n\ndef test_progress_bar2():\n for x in console.ProgressBar(range(50)):\n pass\n\n\ndef test_progress_bar3():\n def do_nothing(*args, **kwargs):\n pass\n\n console.ProgressBar.map(do_nothing, range(50))\n\n\ndef test_zero_progress_bar():\n with console.ProgressBar(0) as bar:\n pass\n\n\ndef test_progress_bar_as_generator():\n sum = 0\n for x in console.ProgressBar(range(50)):\n sum += x\n assert sum == 1225\n\n sum = 0\n for x in console.ProgressBar(50):\n sum += x\n assert sum == 1225\n\n\ndef test_progress_bar_map():\n items = list(range(100))\n result = console.ProgressBar.map(test_progress_bar_func.func,\n items, step=10, multiprocess=True)\n assert items == result\n\n result1 = console.ProgressBar.map(test_progress_bar_func.func,\n items, step=10, multiprocess=2)\n\n assert items == result1\n\n\n@pytest.mark.parametrize((\"seconds\", \"string\"),\n [(864088, \" 1w 3d\"),\n (187213, \" 2d 4h\"),\n (3905, \" 1h 5m\"),\n (64, \" 1m 4s\"),\n (15, \" 15s\"),\n (2, \" 2s\")]\n)\ndef test_human_time(seconds, string):\n human_time = console.human_time(seconds)\n assert human_time == string\n\n\n@pytest.mark.parametrize((\"size\", \"string\"),\n [(8640882, \"8.6M\"),\n (187213, \"187k\"),\n (3905, \"3.9k\"),\n (64, \" 64 \"),\n (2, \" 2 \"),\n (10*u.GB, \" 10G\")]\n)\ndef test_human_file_size(size, string):\n human_time = console.human_file_size(size)\n assert human_time == string\n\n\n@pytest.mark.parametrize(\"size\", (50*u.km, 100*u.g))\ndef test_bad_human_file_size(size):\n assert pytest.raises(u.UnitConversionError, console.human_file_size, size)\n"}}},{"rowIdx":1371,"cells":{"hash":{"kind":"string","value":"2874cdd5f139057a176ee2fcd0b0f886f0d2280ea6406c94f349bd6c348ae9b6"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nBuilt-in mask mixin class.\n\nThe design uses `Masked` as a factory class which automatically\ngenerates new subclasses for any data class that is itself a\nsubclass of a predefined masked class, with `MaskedNDArray`\nproviding such a predefined class for `~numpy.ndarray`.\n\nGenerally, any new predefined class should override the\n``from_unmasked(data, mask, copy=False)`` class method that\ncreates an instance from unmasked data and a mask, as well as\nthe ``unmasked`` property that returns just the data.\nThe `Masked` class itself provides a base ``mask`` property,\nwhich can also be overridden if needed.\n\n\"\"\"\nimport builtins\n\nimport numpy as np\n\nfrom astropy.utils.compat import NUMPY_LT_1_22\nfrom astropy.utils.shapes import NDArrayShapeMethods\nfrom astropy.utils.data_info import ParentDtypeInfo\n\nfrom .function_helpers import (MASKED_SAFE_FUNCTIONS,\n APPLY_TO_BOTH_FUNCTIONS,\n DISPATCHED_FUNCTIONS,\n UNSUPPORTED_FUNCTIONS)\n\n\n__all__ = ['Masked', 'MaskedNDArray']\n\n\nget__doc__ = \"\"\"Masked version of {0.__name__}.\n\nExcept for the ability to pass in a ``mask``, parameters are\nas for `{0.__module__}.{0.__name__}`.\n\"\"\".format\n\n\nclass Masked(NDArrayShapeMethods):\n \"\"\"A scalar value or array of values with associated mask.\n\n The resulting instance will take its exact type from whatever the\n contents are, with the type generated on the fly as needed.\n\n Parameters\n ----------\n data : array-like\n The data for which a mask is to be added. The result will be a\n a subclass of the type of ``data``.\n mask : array-like of bool, optional\n The initial mask to assign. If not given, taken from the data.\n copy : bool\n Whether the data and mask should be copied. Default: `False`.\n\n \"\"\"\n\n _base_classes = {}\n \"\"\"Explicitly defined masked classes keyed by their unmasked counterparts.\n\n For subclasses of these unmasked classes, masked counterparts can be generated.\n \"\"\"\n\n _masked_classes = {}\n \"\"\"Masked classes keyed by their unmasked data counterparts.\"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is Masked:\n # Initializing with Masked itself means we're in \"factory mode\".\n if not kwargs and len(args) == 1 and isinstance(args[0], type):\n # Create a new masked class.\n return cls._get_masked_cls(args[0])\n else:\n return cls._get_masked_instance(*args, **kwargs)\n else:\n # Otherwise we're a subclass and should just pass information on.\n return super().__new__(cls, *args, **kwargs)\n\n def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):\n \"\"\"Register a Masked subclass.\n\n Parameters\n ----------\n base_cls : type, optional\n If given, it is taken to mean that ``cls`` can be used as\n a base for masked versions of all subclasses of ``base_cls``,\n so it is registered as such in ``_base_classes``.\n data_cls : type, optional\n If given, ``cls`` should will be registered as the masked version of\n ``data_cls``. Will set the private ``cls._data_cls`` attribute,\n and auto-generate a docstring if not present already.\n **kwargs\n Passed on for possible further initialization by superclasses.\n\n \"\"\"\n if base_cls is not None:\n Masked._base_classes[base_cls] = cls\n\n if data_cls is not None:\n cls._data_cls = data_cls\n cls._masked_classes[data_cls] = cls\n if cls.__doc__ is None:\n cls.__doc__ = get__doc__(data_cls)\n\n super().__init_subclass__(**kwargs)\n\n # This base implementation just uses the class initializer.\n # Subclasses can override this in case the class does not work\n # with this signature, or to provide a faster implementation.\n @classmethod\n def from_unmasked(cls, data, mask=None, copy=False):\n \"\"\"Create an instance from unmasked data and a mask.\"\"\"\n return cls(data, mask=mask, copy=copy)\n\n @classmethod\n def _get_masked_instance(cls, data, mask=None, copy=False):\n data, data_mask = cls._get_data_and_mask(data)\n if mask is None:\n mask = False if data_mask is None else data_mask\n\n masked_cls = cls._get_masked_cls(data.__class__)\n return masked_cls.from_unmasked(data, mask, copy)\n\n @classmethod\n def _get_masked_cls(cls, data_cls):\n \"\"\"Get the masked wrapper for a given data class.\n\n If the data class does not exist yet but is a subclass of any of the\n registered base data classes, it is automatically generated\n (except we skip `~numpy.ma.MaskedArray` subclasses, since then the\n masking mechanisms would interfere).\n \"\"\"\n if issubclass(data_cls, (Masked, np.ma.MaskedArray)):\n return data_cls\n\n masked_cls = cls._masked_classes.get(data_cls)\n if masked_cls is None:\n # Walk through MRO and find closest base data class.\n # Note: right now, will basically always be ndarray, but\n # one could imagine needing some special care for one subclass,\n # which would then get its own entry. E.g., if MaskedAngle\n # defined something special, then MaskedLongitude should depend\n # on it.\n for mro_item in data_cls.__mro__:\n base_cls = cls._base_classes.get(mro_item)\n if base_cls is not None:\n break\n else:\n # Just hope that MaskedNDArray can handle it.\n # TODO: this covers the case where a user puts in a list or so,\n # but for those one could just explicitly do something like\n # _masked_classes[list] = MaskedNDArray.\n return MaskedNDArray\n\n # Create (and therefore register) new Masked subclass for the\n # given data_cls.\n masked_cls = type('Masked' + data_cls.__name__,\n (data_cls, base_cls), {}, data_cls=data_cls)\n\n return masked_cls\n\n @classmethod\n def _get_data_and_mask(cls, data, allow_ma_masked=False):\n \"\"\"Split data into unmasked and mask, if present.\n\n Parameters\n ----------\n data : array-like\n Possibly masked item, judged by whether it has a ``mask`` attribute.\n If so, checks for being an instance of `~astropy.utils.masked.Masked`\n or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.\n allow_ma_masked : bool, optional\n Whether or not to process `~numpy.ma.masked`, i.e., an item that\n implies no data but the presence of a mask.\n\n Returns\n -------\n unmasked, mask : array-like\n Unmasked will be `None` for `~numpy.ma.masked`.\n\n Raises\n ------\n ValueError\n If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.\n\n \"\"\"\n mask = getattr(data, 'mask', None)\n if mask is not None:\n try:\n data = data.unmasked\n except AttributeError:\n if not isinstance(data, np.ma.MaskedArray):\n raise\n if data is np.ma.masked:\n if allow_ma_masked:\n data = None\n else:\n raise ValueError('cannot handle np.ma.masked here.') from None\n else:\n data = data.data\n\n return data, mask\n\n @classmethod\n def _get_data_and_masks(cls, *args):\n data_masks = [cls._get_data_and_mask(arg) for arg in args]\n return (tuple(data for data, _ in data_masks),\n tuple(mask for _, mask in data_masks))\n\n def _get_mask(self):\n \"\"\"The mask.\n\n If set, replace the original mask, with whatever it is set with,\n using a view if no broadcasting or type conversion is required.\n \"\"\"\n return self._mask\n\n def _set_mask(self, mask, copy=False):\n self_dtype = getattr(self, 'dtype', None)\n mask_dtype = (np.ma.make_mask_descr(self_dtype)\n if self_dtype and self_dtype.names else np.dtype('?'))\n ma = np.asanyarray(mask, dtype=mask_dtype)\n if ma.shape != self.shape:\n # This will fail (correctly) if not broadcastable.\n self._mask = np.empty(self.shape, dtype=mask_dtype)\n self._mask[...] = ma\n elif ma is mask:\n # Even if not copying use a view so that shape setting\n # does not propagate.\n self._mask = mask.copy() if copy else mask.view()\n else:\n self._mask = ma\n\n mask = property(_get_mask, _set_mask)\n\n # Note: subclass should generally override the unmasked property.\n # This one assumes the unmasked data is stored in a private attribute.\n @property\n def unmasked(self):\n \"\"\"The unmasked values.\n\n See Also\n --------\n astropy.utils.masked.Masked.filled\n \"\"\"\n return self._unmasked\n\n def filled(self, fill_value):\n \"\"\"Get a copy of the underlying data, with masked values filled in.\n\n Parameters\n ----------\n fill_value : object\n Value to replace masked values with.\n\n See Also\n --------\n astropy.utils.masked.Masked.unmasked\n \"\"\"\n unmasked = self.unmasked.copy()\n if self.mask.dtype.names:\n np.ma.core._recursive_filled(unmasked, self.mask, fill_value)\n else:\n unmasked[self.mask] = fill_value\n\n return unmasked\n\n def _apply(self, method, *args, **kwargs):\n # Required method for NDArrayShapeMethods, to help provide __getitem__\n # and shape-changing methods.\n if callable(method):\n data = method(self.unmasked, *args, **kwargs)\n mask = method(self.mask, *args, **kwargs)\n else:\n data = getattr(self.unmasked, method)(*args, **kwargs)\n mask = getattr(self.mask, method)(*args, **kwargs)\n\n result = self.from_unmasked(data, mask, copy=False)\n if 'info' in self.__dict__:\n result.info = self.info\n\n return result\n\n def __setitem__(self, item, value):\n value, mask = self._get_data_and_mask(value, allow_ma_masked=True)\n if value is not None:\n self.unmasked[item] = value\n self.mask[item] = mask\n\n\nclass MaskedInfoBase:\n mask_val = np.ma.masked\n\n def __init__(self, bound=False):\n super().__init__(bound)\n\n # If bound to a data object instance then create the dict of attributes\n # which stores the info attribute values.\n if bound:\n # Specify how to serialize this object depending on context.\n self.serialize_method = {'fits': 'null_value',\n 'ecsv': 'null_value',\n 'hdf5': 'data_mask',\n 'parquet': 'data_mask',\n None: 'null_value'}\n\n\nclass MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):\n \"\"\"\n Container for meta information like name, description, format.\n \"\"\"\n\n # Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows\n # about. This allows customization of the way that MaskedColumn objects\n # get written to file depending on format. The default is to use whatever\n # the writer would normally do, which in the case of FITS or ECSV is to use\n # a NULL value within the data itself. If serialize_method is 'data_mask'\n # then the mask is explicitly written out as a separate column if there\n # are any masked values. This is the same as for MaskedColumn.\n attr_names = ParentDtypeInfo.attr_names | {'serialize_method'}\n\n # When `serialize_method` is 'data_mask', and data and mask are being written\n # as separate columns, use column names and .mask (instead\n # of default encoding as .data and .mask).\n _represent_as_dict_primary_data = 'data'\n\n def _represent_as_dict(self):\n out = super()._represent_as_dict()\n\n masked_array = self._parent\n\n # If the serialize method for this context (e.g. 'fits' or 'ecsv') is\n # 'data_mask', that means to serialize using an explicit mask column.\n method = self.serialize_method[self._serialize_context]\n\n if method == 'data_mask':\n out['data'] = masked_array.unmasked\n\n if np.any(masked_array.mask):\n # Only if there are actually masked elements do we add the ``mask`` column\n out['mask'] = masked_array.mask\n\n elif method == 'null_value':\n out['data'] = np.ma.MaskedArray(masked_array.unmasked,\n mask=masked_array.mask)\n\n else:\n raise ValueError('serialize method must be either \"data_mask\" or \"null_value\"')\n\n return out\n\n def _construct_from_dict(self, map):\n # Override usual handling, since MaskedNDArray takes shape and buffer\n # as input, which is less useful here.\n # The map can contain either a MaskedColumn or a Column and a mask.\n # Extract the mask for the former case.\n map.setdefault('mask', getattr(map['data'], 'mask', False))\n return self._parent_cls.from_unmasked(**map)\n\n\nclass MaskedArraySubclassInfo(MaskedInfoBase):\n \"\"\"Mixin class to create a subclasses such as MaskedQuantityInfo.\"\"\"\n # This is used below in __init_subclass__, which also inserts a\n # 'serialize_method' attribute in attr_names.\n\n def _represent_as_dict(self):\n # Use the data_cls as the class name for serialization,\n # so that we do not have to store all possible masked classes\n # in astropy.table.serialize.__construct_mixin_classes.\n out = super()._represent_as_dict()\n data_cls = self._parent._data_cls\n out.setdefault('__class__',\n data_cls.__module__ + '.' + data_cls.__name__)\n return out\n\n\ndef _comparison_method(op):\n \"\"\"\n Create a comparison operator for MaskedNDArray.\n\n Needed since for string dtypes the base operators bypass __array_ufunc__\n and hence return unmasked results.\n \"\"\"\n def _compare(self, other):\n other_data, other_mask = self._get_data_and_mask(other)\n result = getattr(self.unmasked, op)(other_data)\n if result is NotImplemented:\n return NotImplemented\n mask = self.mask | (other_mask if other_mask is not None else False)\n return self._masked_result(result, mask, None)\n\n return _compare\n\n\nclass MaskedIterator:\n \"\"\"\n Flat iterator object to iterate over Masked Arrays.\n\n A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``\n for any masked array ``m``. It allows iterating over the array as if it\n were a 1-D array, either in a for-loop or by calling its `next` method.\n\n Iteration is done in C-contiguous style, with the last index varying the\n fastest. The iterator can also be indexed using basic slicing or\n advanced indexing.\n\n Notes\n -----\n The design of `~astropy.utils.masked.MaskedIterator` follows that of\n `~numpy.ma.core.MaskedIterator`. It is not exported by the\n `~astropy.utils.masked` module. Instead of instantiating directly,\n use the ``flat`` method in the masked array instance.\n \"\"\"\n\n def __init__(self, m):\n self._masked = m\n self._dataiter = m.unmasked.flat\n self._maskiter = m.mask.flat\n\n def __iter__(self):\n return self\n\n def __getitem__(self, indx):\n out = self._dataiter.__getitem__(indx)\n mask = self._maskiter.__getitem__(indx)\n # For single elements, ndarray.flat.__getitem__ returns scalars; these\n # need a new view as a Masked array.\n if not isinstance(out, np.ndarray):\n out = out[...]\n mask = mask[...]\n\n return self._masked.from_unmasked(out, mask, copy=False)\n\n def __setitem__(self, index, value):\n data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)\n if data is not None:\n self._dataiter[index] = data\n self._maskiter[index] = mask\n\n def __next__(self):\n \"\"\"\n Return the next value, or raise StopIteration.\n \"\"\"\n out = next(self._dataiter)[...]\n mask = next(self._maskiter)[...]\n return self._masked.from_unmasked(out, mask, copy=False)\n\n next = __next__\n\n\nclass MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):\n _mask = None\n\n info = MaskedNDArrayInfo()\n\n def __new__(cls, *args, mask=None, **kwargs):\n \"\"\"Get data class instance from arguments and then set mask.\"\"\"\n self = super().__new__(cls, *args, **kwargs)\n if mask is not None:\n self.mask = mask\n elif self._mask is None:\n self.mask = False\n return self\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(cls, **kwargs)\n # For all subclasses we should set a default __new__ that passes on\n # arguments other than mask to the data class, and then sets the mask.\n if '__new__' not in cls.__dict__:\n def __new__(newcls, *args, mask=None, **kwargs):\n \"\"\"Get data class instance from arguments and then set mask.\"\"\"\n # Need to explicitly mention classes outside of class definition.\n self = super(cls, newcls).__new__(newcls, *args, **kwargs)\n if mask is not None:\n self.mask = mask\n elif self._mask is None:\n self.mask = False\n return self\n cls.__new__ = __new__\n\n if 'info' not in cls.__dict__ and hasattr(cls._data_cls, 'info'):\n data_info = cls._data_cls.info\n attr_names = data_info.attr_names | {'serialize_method'}\n new_info = type(cls.__name__+'Info',\n (MaskedArraySubclassInfo, data_info.__class__),\n dict(attr_names=attr_names))\n cls.info = new_info()\n\n # The two pieces typically overridden.\n @classmethod\n def from_unmasked(cls, data, mask=None, copy=False):\n # Note: have to override since __new__ would use ndarray.__new__\n # which expects the shape as its first argument, not an array.\n data = np.array(data, subok=True, copy=copy)\n self = data.view(cls)\n self._set_mask(mask, copy=copy)\n return self\n\n @property\n def unmasked(self):\n return super().view(self._data_cls)\n\n @classmethod\n def _get_masked_cls(cls, data_cls):\n # Short-cuts\n if data_cls is np.ndarray:\n return MaskedNDArray\n elif data_cls is None: # for .view()\n return cls\n\n return super()._get_masked_cls(data_cls)\n\n @property\n def flat(self):\n \"\"\"A 1-D iterator over the Masked array.\n\n This returns a ``MaskedIterator`` instance, which behaves the same\n as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,\n and is similar to Python's built-in iterator, except that it also\n allows assignment.\n \"\"\"\n return MaskedIterator(self)\n\n @property\n def _baseclass(self):\n \"\"\"Work-around for MaskedArray initialization.\n\n Allows the base class to be inferred correctly when a masked instance\n is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.\n\n \"\"\"\n return self._data_cls\n\n def view(self, dtype=None, type=None):\n \"\"\"New view of the masked array.\n\n Like `numpy.ndarray.view`, but always returning a masked array subclass.\n \"\"\"\n if type is None and (isinstance(dtype, builtins.type)\n and issubclass(dtype, np.ndarray)):\n return super().view(self._get_masked_cls(dtype))\n\n if dtype is None:\n return super().view(self._get_masked_cls(type))\n\n dtype = np.dtype(dtype)\n if not (dtype.itemsize == self.dtype.itemsize\n and (dtype.names is None\n or len(dtype.names) == len(self.dtype.names))):\n raise NotImplementedError(\n f\"{self.__class__} cannot be viewed with a dtype with a \"\n f\"with a different number of fields or size.\")\n\n return super().view(dtype, self._get_masked_cls(type))\n\n def __array_finalize__(self, obj):\n # If we're a new object or viewing an ndarray, nothing has to be done.\n if obj is None or obj.__class__ is np.ndarray:\n return\n\n # Logically, this should come from ndarray and hence be None, but\n # just in case someone creates a new mixin, we check.\n super_array_finalize = super().__array_finalize__\n if super_array_finalize: # pragma: no cover\n super_array_finalize(obj)\n\n if self._mask is None:\n # Got here after, e.g., a view of another masked class.\n # Get its mask, or initialize ours.\n self._set_mask(getattr(obj, '_mask', False))\n\n if 'info' in obj.__dict__:\n self.info = obj.info\n\n @property\n def shape(self):\n \"\"\"The shape of the data and the mask.\n\n Usually used to get the current shape of an array, but may also be\n used to reshape the array in-place by assigning a tuple of array\n dimensions to it. As with `numpy.reshape`, one of the new shape\n dimensions can be -1, in which case its value is inferred from the\n size of the array and the remaining dimensions.\n\n Raises\n ------\n AttributeError\n If a copy is required, of either the data or the mask.\n\n \"\"\"\n # Redefinition to allow defining a setter and add a docstring.\n return super().shape\n\n @shape.setter\n def shape(self, shape):\n old_shape = self.shape\n self._mask.shape = shape\n # Reshape array proper in try/except just in case some broadcasting\n # or so causes it to fail.\n try:\n super(MaskedNDArray, type(self)).shape.__set__(self, shape)\n except Exception as exc:\n self._mask.shape = old_shape\n # Given that the mask reshaping succeeded, the only logical\n # reason for an exception is something like a broadcast error in\n # in __array_finalize__, or a different memory ordering between\n # mask and data. For those, give a more useful error message;\n # otherwise just raise the error.\n if 'could not broadcast' in exc.args[0]:\n raise AttributeError(\n 'Incompatible shape for in-place modification. '\n 'Use `.reshape()` to make a copy with the desired '\n 'shape.') from None\n else: # pragma: no cover\n raise\n\n _eq_simple = _comparison_method('__eq__')\n _ne_simple = _comparison_method('__ne__')\n __lt__ = _comparison_method('__lt__')\n __le__ = _comparison_method('__le__')\n __gt__ = _comparison_method('__gt__')\n __ge__ = _comparison_method('__ge__')\n\n def __eq__(self, other):\n if not self.dtype.names:\n return self._eq_simple(other)\n\n # For structured arrays, we treat this as a reduction over the fields,\n # where masked fields are skipped and thus do not influence the result.\n other = np.asanyarray(other, dtype=self.dtype)\n result = np.stack([self[field] == other[field]\n for field in self.dtype.names], axis=-1)\n return result.all(axis=-1)\n\n def __ne__(self, other):\n if not self.dtype.names:\n return self._ne_simple(other)\n\n # For structured arrays, we treat this as a reduction over the fields,\n # where masked fields are skipped and thus do not influence the result.\n other = np.asanyarray(other, dtype=self.dtype)\n result = np.stack([self[field] != other[field]\n for field in self.dtype.names], axis=-1)\n return result.any(axis=-1)\n\n def _combine_masks(self, masks, out=None):\n masks = [m for m in masks if m is not None and m is not False]\n if not masks:\n return False\n if len(masks) == 1:\n if out is None:\n return masks[0].copy()\n else:\n np.copyto(out, masks[0])\n return out\n\n out = np.logical_or(masks[0], masks[1], out=out)\n for mask in masks[2:]:\n np.logical_or(out, mask, out=out)\n return out\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n out = kwargs.pop('out', None)\n out_unmasked = None\n out_mask = None\n if out is not None:\n out_unmasked, out_masks = self._get_data_and_masks(*out)\n for d, m in zip(out_unmasked, out_masks):\n if m is None:\n # TODO: allow writing to unmasked output if nothing is masked?\n if d is not None:\n raise TypeError('cannot write to unmasked output')\n elif out_mask is None:\n out_mask = m\n\n unmasked, masks = self._get_data_and_masks(*inputs)\n\n if ufunc.signature:\n # We're dealing with a gufunc. For now, only deal with\n # np.matmul and gufuncs for which the mask of any output always\n # depends on all core dimension values of all inputs.\n # Also ignore axes keyword for now...\n # TODO: in principle, it should be possible to generate the mask\n # purely based on the signature.\n if 'axes' in kwargs:\n raise NotImplementedError(\"Masked does not yet support gufunc \"\n \"calls with 'axes'.\")\n if ufunc is np.matmul:\n # np.matmul is tricky and its signature cannot be parsed by\n # _parse_gufunc_signature.\n unmasked = np.atleast_1d(*unmasked)\n mask0, mask1 = masks\n masks = []\n is_mat1 = unmasked[1].ndim >= 2\n if mask0 is not None:\n masks.append(\n np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))\n\n if mask1 is not None:\n masks.append(\n np.logical_or.reduce(mask1, axis=-2, keepdims=True)\n if is_mat1 else\n np.logical_or.reduce(mask1))\n\n mask = self._combine_masks(masks, out=out_mask)\n\n else:\n # Parse signature with private numpy function. Note it\n # cannot handle spaces in tuples, so remove those.\n in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(\n ufunc.signature.replace(' ', ''))\n axis = kwargs.get('axis', -1)\n keepdims = kwargs.get('keepdims', False)\n in_masks = []\n for sig, mask in zip(in_sig, masks):\n if mask is not None:\n if sig:\n # Input has core dimensions. Assume that if any\n # value in those is masked, the output will be\n # masked too (TODO: for multiple core dimensions\n # this may be too strong).\n mask = np.logical_or.reduce(\n mask, axis=axis, keepdims=keepdims)\n in_masks.append(mask)\n\n mask = self._combine_masks(in_masks)\n result_masks = []\n for os in out_sig:\n if os:\n # Output has core dimensions. Assume all those\n # get the same mask.\n result_mask = np.expand_dims(mask, axis)\n else:\n result_mask = mask\n result_masks.append(result_mask)\n\n mask = result_masks if len(result_masks) > 1 else result_masks[0]\n\n elif method == '__call__':\n # Regular ufunc call.\n mask = self._combine_masks(masks, out=out_mask)\n\n elif method == 'outer':\n # Must have two arguments; adjust masks as will be done for data.\n assert len(masks) == 2\n masks = [(m if m is not None else False) for m in masks]\n mask = np.logical_or.outer(masks[0], masks[1], out=out_mask)\n\n elif method in {'reduce', 'accumulate'}:\n # Reductions like np.add.reduce (sum).\n if masks[0] is not None:\n # By default, we simply propagate masks, since for\n # things like np.sum, it makes no sense to do otherwise.\n # Individual methods need to override as needed.\n # TODO: take care of 'out' too?\n if method == 'reduce':\n axis = kwargs.get('axis', None)\n keepdims = kwargs.get('keepdims', False)\n where = kwargs.get('where', True)\n mask = np.logical_or.reduce(masks[0], where=where,\n axis=axis, keepdims=keepdims,\n out=out_mask)\n if where is not True:\n # Mask also whole rows that were not selected by where,\n # so would have been left as unmasked above.\n mask |= np.logical_and.reduce(masks[0], where=where,\n axis=axis, keepdims=keepdims)\n\n else:\n # Accumulate\n axis = kwargs.get('axis', 0)\n mask = np.logical_or.accumulate(masks[0], axis=axis,\n out=out_mask)\n\n elif out is not None:\n mask = False\n\n else: # pragma: no cover\n # Can only get here if neither input nor output was masked, but\n # perhaps axis or where was masked (in numpy < 1.21 this is\n # possible). We don't support this.\n return NotImplemented\n\n elif method in {'reduceat', 'at'}: # pragma: no cover\n # TODO: implement things like np.add.accumulate (used for cumsum).\n raise NotImplementedError(\"masked instances cannot yet deal with \"\n \"'reduceat' or 'at'.\")\n\n if out_unmasked is not None:\n kwargs['out'] = out_unmasked\n result = getattr(ufunc, method)(*unmasked, **kwargs)\n\n if result is None: # pragma: no cover\n # This happens for the \"at\" method.\n return result\n\n if out is not None and len(out) == 1:\n out = out[0]\n return self._masked_result(result, mask, out)\n\n def __array_function__(self, function, types, args, kwargs):\n # TODO: go through functions systematically to see which ones\n # work and/or can be supported.\n if function in MASKED_SAFE_FUNCTIONS:\n return super().__array_function__(function, types, args, kwargs)\n\n elif function in APPLY_TO_BOTH_FUNCTIONS:\n helper = APPLY_TO_BOTH_FUNCTIONS[function]\n try:\n helper_result = helper(*args, **kwargs)\n except NotImplementedError:\n return self._not_implemented_or_raise(function, types)\n\n data_args, mask_args, kwargs, out = helper_result\n if out is not None:\n if not isinstance(out, Masked):\n return self._not_implemented_or_raise(function, types)\n function(*mask_args, out=out.mask, **kwargs)\n function(*data_args, out=out.unmasked, **kwargs)\n return out\n\n mask = function(*mask_args, **kwargs)\n result = function(*data_args, **kwargs)\n\n elif function in DISPATCHED_FUNCTIONS:\n dispatched_function = DISPATCHED_FUNCTIONS[function]\n try:\n dispatched_result = dispatched_function(*args, **kwargs)\n except NotImplementedError:\n return self._not_implemented_or_raise(function, types)\n\n if not isinstance(dispatched_result, tuple):\n return dispatched_result\n\n result, mask, out = dispatched_result\n\n elif function in UNSUPPORTED_FUNCTIONS:\n return NotImplemented\n\n else: # pragma: no cover\n # By default, just pass it through for now.\n return super().__array_function__(function, types, args, kwargs)\n\n if mask is None:\n return result\n else:\n return self._masked_result(result, mask, out)\n\n def _not_implemented_or_raise(self, function, types):\n # Our function helper or dispatcher found that the function does not\n # work with Masked. In principle, there may be another class that\n # knows what to do with us, for which we should return NotImplemented.\n # But if there is ndarray (or a non-Masked subclass of it) around,\n # it quite likely coerces, so we should just break.\n if any(issubclass(t, np.ndarray) and not issubclass(t, Masked)\n for t in types):\n raise TypeError(\"the MaskedNDArray implementation cannot handle {} \"\n \"with the given arguments.\"\n .format(function)) from None\n else:\n return NotImplemented\n\n def _masked_result(self, result, mask, out):\n if isinstance(result, tuple):\n if out is None:\n out = (None,) * len(result)\n if not isinstance(mask, (list, tuple)):\n mask = (mask,) * len(result)\n return tuple(self._masked_result(result_, mask_, out_)\n for (result_, mask_, out_) in zip(result, mask, out))\n\n if out is None:\n # Note that we cannot count on result being the same class as\n # 'self' (e.g., comparison of quantity results in an ndarray, most\n # operations on Longitude and Latitude result in Angle or\n # Quantity), so use Masked to determine the appropriate class.\n return Masked(result, mask)\n\n # TODO: remove this sanity check once test cases are more complete.\n assert isinstance(out, Masked)\n # If we have an output, the result was written in-place, so we should\n # also write the mask in-place (if not done already in the code).\n if out._mask is not mask:\n out._mask[...] = mask\n return out\n\n # Below are ndarray methods that need to be overridden as masked elements\n # need to be skipped and/or an initial value needs to be set.\n def _reduce_defaults(self, kwargs, initial_func=None):\n \"\"\"Get default where and initial for masked reductions.\n\n Generally, the default should be to skip all masked elements. For\n reductions such as np.minimum.reduce, we also need an initial value,\n which can be determined using ``initial_func``.\n\n \"\"\"\n if 'where' not in kwargs:\n kwargs['where'] = ~self.mask\n if initial_func is not None and 'initial' not in kwargs:\n kwargs['initial'] = initial_func(self.unmasked)\n return kwargs\n\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n # Unfortunately, cannot override the call to diagonal inside trace, so\n # duplicate implementation in numpy/core/src/multiarray/calculation.c.\n diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)\n return diagonal.sum(-1, dtype=dtype, out=out)\n\n def min(self, axis=None, out=None, **kwargs):\n return super().min(axis=axis, out=out,\n **self._reduce_defaults(kwargs, np.nanmax))\n\n def max(self, axis=None, out=None, **kwargs):\n return super().max(axis=axis, out=out,\n **self._reduce_defaults(kwargs, np.nanmin))\n\n def nonzero(self):\n unmasked_nonzero = self.unmasked.nonzero()\n if self.ndim >= 1:\n not_masked = ~self.mask[unmasked_nonzero]\n return tuple(u[not_masked] for u in unmasked_nonzero)\n else:\n return unmasked_nonzero if not self.mask else np.nonzero(0)\n\n def compress(self, condition, axis=None, out=None):\n if out is not None:\n raise NotImplementedError('cannot yet give output')\n return self._apply('compress', condition, axis=axis)\n\n def repeat(self, repeats, axis=None):\n return self._apply('repeat', repeats, axis=axis)\n\n def choose(self, choices, out=None, mode='raise'):\n # Let __array_function__ take care since choices can be masked too.\n return np.choose(self, choices, out=out, mode=mode)\n\n if NUMPY_LT_1_22:\n def argmin(self, axis=None, out=None):\n # Todo: should this return a masked integer array, with masks\n # if all elements were masked?\n at_min = self == self.min(axis=axis, keepdims=True)\n return at_min.filled(False).argmax(axis=axis, out=out)\n\n def argmax(self, axis=None, out=None):\n at_max = self == self.max(axis=axis, keepdims=True)\n return at_max.filled(False).argmax(axis=axis, out=out)\n\n else:\n def argmin(self, axis=None, out=None, *, keepdims=False):\n # Todo: should this return a masked integer array, with masks\n # if all elements were masked?\n at_min = self == self.min(axis=axis, keepdims=True)\n return at_min.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)\n\n def argmax(self, axis=None, out=None, *, keepdims=False):\n at_max = self == self.max(axis=axis, keepdims=True)\n return at_max.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)\n\n def argsort(self, axis=-1, kind=None, order=None):\n \"\"\"Returns the indices that would sort an array.\n\n Perform an indirect sort along the given axis on both the array\n and the mask, with masked items being sorted to the end.\n\n Parameters\n ----------\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis).\n If None, the flattened array is used.\n kind : str or None, ignored.\n The kind of sort. Present only to allow subclasses to work.\n order : str or list of str.\n For an array with fields defined, the fields to compare first,\n second, etc. A single field can be specified as a string, and not\n all fields need be specified, but unspecified fields will still be\n used, in dtype order, to break ties.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sorts along the specified ``axis``. Use\n ``np.take_along_axis(self, index_array, axis=axis)`` to obtain\n the sorted array.\n\n \"\"\"\n if axis is None:\n data = self.ravel()\n axis = -1\n else:\n data = self\n\n if self.dtype.names:\n # As done inside the argsort implementation in multiarray/methods.c.\n if order is None:\n order = self.dtype.names\n else:\n order = np.core._internal._newnames(self.dtype, order)\n\n keys = tuple(data[name] for name in order[::-1])\n\n elif order is not None:\n raise ValueError('Cannot specify order when the array has no fields.')\n\n else:\n keys = (data,)\n\n return np.lexsort(keys, axis=axis)\n\n def sort(self, axis=-1, kind=None, order=None):\n \"\"\"Sort an array in-place. Refer to `numpy.sort` for full documentation.\"\"\"\n # TODO: probably possible to do this faster than going through argsort!\n indices = self.argsort(axis, kind=kind, order=order)\n self[:] = np.take_along_axis(self, indices, axis=axis)\n\n def argpartition(self, kth, axis=-1, kind='introselect', order=None):\n # TODO: should be possible to do this faster than with a full argsort!\n return self.argsort(axis=axis, order=order)\n\n def partition(self, kth, axis=-1, kind='introselect', order=None):\n # TODO: should be possible to do this faster than with a full argsort!\n return self.sort(axis=axis, order=None)\n\n def cumsum(self, axis=None, dtype=None, out=None):\n if axis is None:\n self = self.ravel()\n axis = 0\n return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)\n\n def cumprod(self, axis=None, dtype=None, out=None):\n if axis is None:\n self = self.ravel()\n axis = 0\n return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)\n\n def clip(self, min=None, max=None, out=None, **kwargs):\n \"\"\"Return an array whose values are limited to ``[min, max]``.\n\n Like `~numpy.clip`, but any masked values in ``min`` and ``max``\n are ignored for clipping. The mask of the input array is propagated.\n \"\"\"\n # TODO: implement this at the ufunc level.\n dmin, mmin = self._get_data_and_mask(min)\n dmax, mmax = self._get_data_and_mask(max)\n if mmin is None and mmax is None:\n # Fast path for unmasked max, min.\n return super().clip(min, max, out=out, **kwargs)\n\n masked_out = np.positive(self, out=out)\n out = masked_out.unmasked\n if dmin is not None:\n np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)\n if dmax is not None:\n np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)\n return masked_out\n\n def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):\n # Implementation based on that in numpy/core/_methods.py\n # Cast bool, unsigned int, and int to float64 by default,\n # and do float16 at higher precision.\n is_float16_result = False\n if dtype is None:\n if issubclass(self.dtype.type, (np.integer, np.bool_)):\n dtype = np.dtype('f8')\n elif issubclass(self.dtype.type, np.float16):\n dtype = np.dtype('f4')\n is_float16_result = out is None\n\n where = ~self.mask & where\n\n result = self.sum(axis=axis, dtype=dtype, out=out,\n keepdims=keepdims, where=where)\n n = np.add.reduce(where, axis=axis, keepdims=keepdims)\n result /= n\n if is_float16_result:\n result = result.astype(self.dtype)\n return result\n\n def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):\n where_final = ~self.mask & where\n\n # Simplified implementation based on that in numpy/core/_methods.py\n n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]\n\n # Cast bool, unsigned int, and int to float64 by default.\n if dtype is None and issubclass(self.dtype.type,\n (np.integer, np.bool_)):\n dtype = np.dtype('f8')\n mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)\n\n x = self - mean\n x *= x.conjugate() # Conjugate just returns x if not complex.\n\n result = x.sum(axis=axis, dtype=dtype, out=out,\n keepdims=keepdims, where=where_final)\n n -= ddof\n n = np.maximum(n, 0, out=n)\n result /= n\n result._mask |= (n == 0)\n return result\n\n def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):\n result = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof,\n keepdims=keepdims, where=where)\n return np.sqrt(result, out=result)\n\n def __bool__(self):\n # First get result from array itself; this will error if not a scalar.\n result = super().__bool__()\n return result and not self.mask\n\n def any(self, axis=None, out=None, keepdims=False, *, where=True):\n return np.logical_or.reduce(self, axis=axis, out=out,\n keepdims=keepdims, where=~self.mask & where)\n\n def all(self, axis=None, out=None, keepdims=False, *, where=True):\n return np.logical_and.reduce(self, axis=axis, out=out,\n keepdims=keepdims, where=~self.mask & where)\n\n # Following overrides needed since somehow the ndarray implementation\n # does not actually call these.\n def __str__(self):\n return np.array_str(self)\n\n def __repr__(self):\n return np.array_repr(self)\n\n def __format__(self, format_spec):\n string = super().__format__(format_spec)\n if self.shape == () and self.mask:\n n = min(3, max(1, len(string)))\n return ' ' * (len(string)-n) + '\\u2014' * n\n else:\n return string\n\n\nclass MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):\n # Explicit definition since we need to override some methods.\n\n def __array_finalize__(self, obj):\n # recarray.__array_finalize__ does not do super, so we do it\n # explicitly.\n super().__array_finalize__(obj)\n super(np.recarray, self).__array_finalize__(obj)\n\n # __getattribute__, __setattr__, and field use these somewhat\n # obscrure ndarray methods. TODO: override in MaskedNDArray?\n def getfield(self, dtype, offset=0):\n for field, info in self.dtype.fields.items():\n if offset == info[1] and dtype == info[0]:\n return self[field]\n\n raise NotImplementedError('can only get existing field from '\n 'structured dtype.')\n\n def setfield(self, val, dtype, offset=0):\n for field, info in self.dtype.fields.items():\n if offset == info[1] and dtype == info[0]:\n self[field] = val\n return\n\n raise NotImplementedError('can only set existing field from '\n 'structured dtype.')\n"}}},{"rowIdx":1372,"cells":{"hash":{"kind":"string","value":"43016585b39c175366044bee48452701a4fa4191d5b2259b566b5f8572e046a6"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Helpers for letting numpy functions interact with Masked arrays.\n\nThe module supplies helper routines for numpy functions that propagate\nmasks appropriately., for use in the ``__array_function__``\nimplementation of `~astropy.utils.masked.MaskedNDArray`. They are not\nvery useful on their own, but the ones with docstrings are included in\nthe documentation so that there is a place to find out how the mask is\ninterpreted.\n\n\"\"\"\nimport numpy as np\n\nfrom astropy.units.quantity_helper.function_helpers import (\n FunctionAssigner)\nfrom astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23\n\n# This module should not really be imported, but we define __all__\n# such that sphinx can typeset the functions with docstrings.\n# The latter are added to __all__ at the end.\n__all__ = ['MASKED_SAFE_FUNCTIONS', 'APPLY_TO_BOTH_FUNCTIONS',\n 'DISPATCHED_FUNCTIONS', 'UNSUPPORTED_FUNCTIONS']\n\n\nMASKED_SAFE_FUNCTIONS = set()\n\"\"\"Set of functions that work fine on Masked classes already.\n\nMost of these internally use `numpy.ufunc` or other functions that\nare already covered.\n\"\"\"\n\nAPPLY_TO_BOTH_FUNCTIONS = {}\n\"\"\"Dict of functions that should apply to both data and mask.\n\nThe `dict` is keyed by the numpy function and the values are functions\nthat take the input arguments of the numpy function and organize these\nfor passing the data and mask to the numpy function.\n\nReturns\n-------\ndata_args : tuple\n Arguments to pass on to the numpy function for the unmasked data.\nmask_args : tuple\n Arguments to pass on to the numpy function for the masked data.\nkwargs : dict\n Keyword arguments to pass on for both unmasked data and mask.\nout : `~astropy.utils.masked.Masked` instance or None\n Optional instance in which to store the output.\n\nRaises\n------\nNotImplementedError\n When an arguments is masked when it should not be or vice versa.\n\"\"\"\n\nDISPATCHED_FUNCTIONS = {}\n\"\"\"Dict of functions that provide the numpy function's functionality.\n\nThese are for more complicated versions where the numpy function itself\ncannot easily be used. It should return either the result of the\nfunction, or a tuple consisting of the unmasked result, the mask for the\nresult and a possible output instance.\n\nIt should raise `NotImplementedError` if one of the arguments is masked\nwhen it should not be or vice versa.\n\"\"\"\n\nUNSUPPORTED_FUNCTIONS = set()\n\"\"\"Set of numpy functions that are not supported for masked arrays.\n\nFor most, masked input simply makes no sense, but for others it may have\nbeen lack of time. Issues or PRs for support for functions are welcome.\n\"\"\"\n\n# Almost all from np.core.fromnumeric defer to methods so are OK.\nMASKED_SAFE_FUNCTIONS |= {\n getattr(np, name) for name in np.core.fromnumeric.__all__\n if name not in {'choose', 'put', 'resize', 'searchsorted', 'where', 'alen'}}\n\nMASKED_SAFE_FUNCTIONS |= {\n # built-in from multiarray\n np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,\n np.shares_memory,\n # np.core.arrayprint\n np.array_repr,\n # np.core.function_base\n np.linspace, np.logspace, np.geomspace,\n # np.core.numeric\n np.isclose, np.allclose, np.flatnonzero, np.argwhere,\n # np.core.shape_base\n np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,\n # np.lib.function_base\n np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,\n # np.lib.index_tricks\n np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,\n np.fill_diagonal,\n # np.lib.shape_base\n np.column_stack, np.row_stack, np.dstack,\n np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,\n np.expand_dims, np.apply_along_axis, np.kron, np.tile,\n np.take_along_axis, np.put_along_axis,\n # np.lib.type_check (all but asfarray, nan_to_num)\n np.iscomplexobj, np.isrealobj, np.imag, np.isreal,\n np.real, np.real_if_close, np.common_type,\n # np.lib.ufunclike\n np.fix, np.isneginf, np.isposinf,\n # np.lib.function_base\n np.angle, np.i0,\n}\n\nIGNORED_FUNCTIONS = {\n # I/O - useless for Masked, since no way to store the mask.\n np.save, np.savez, np.savetxt, np.savez_compressed,\n # Polynomials\n np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,\n np.polymul, np.polysub, np.polyval, np.roots, np.vander}\nif NUMPY_LT_1_20:\n # financial\n IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,\n np.npv, np.pmt, np.ppmt, np.pv, np.rate}\n\n# TODO: some of the following could in principle be supported.\nIGNORED_FUNCTIONS |= {\n np.pad,\n np.searchsorted, np.digitize,\n np.is_busday, np.busday_count, np.busday_offset,\n # numpy.lib.function_base\n np.cov, np.corrcoef, np.trim_zeros,\n # numpy.core.numeric\n np.correlate, np.convolve,\n # numpy.lib.histograms\n np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,\n # TODO!!\n np.dot, np.vdot, np.inner, np.tensordot, np.cross,\n np.einsum, np.einsum_path,\n}\n\n# Really should do these...\nIGNORED_FUNCTIONS |= {getattr(np, setopsname) for setopsname in np.lib.arraysetops.__all__}\n\n\nif NUMPY_LT_1_23:\n IGNORED_FUNCTIONS |= {\n # Deprecated, removed in numpy 1.23\n np.asscalar, np.alen,\n }\n\n# Explicitly unsupported functions\nUNSUPPORTED_FUNCTIONS |= {\n np.unravel_index, np.ravel_multi_index, np.ix_,\n}\n\n# No support for the functions also not supported by Quantity\n# (io, polynomial, etc.).\nUNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS\n\n\napply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)\ndispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)\n\n\ndef _get_data_and_masks(*args):\n \"\"\"Separate out arguments into tuples of data and masks.\n\n An all-False mask is created if an argument does not have a mask.\n \"\"\"\n from .core import Masked\n\n data, masks = Masked._get_data_and_masks(*args)\n masks = tuple(m if m is not None else np.zeros(np.shape(d), bool)\n for d, m in zip(data, masks))\n return data, masks\n\n\n# Following are simple ufunc-like functions which should just copy the mask.\n@dispatched_function\ndef datetime_as_string(arr, *args, **kwargs):\n return (np.datetime_as_string(arr.unmasked, *args, **kwargs),\n arr.mask.copy(), None)\n\n\n@dispatched_function\ndef sinc(x):\n return np.sinc(x.unmasked), x.mask.copy(), None\n\n\n@dispatched_function\ndef iscomplex(x):\n return np.iscomplex(x.unmasked), x.mask.copy(), None\n\n\n@dispatched_function\ndef unwrap(p, *args, **kwargs):\n return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None\n\n\n@dispatched_function\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):\n data = np.nan_to_num(x.unmasked, copy=copy,\n nan=nan, posinf=posinf, neginf=neginf)\n return (data, x.mask.copy(), None) if copy else x\n\n\n# Following are simple functions related to shapes, where the same function\n# should be applied to the data and the mask. They cannot all share the\n# same helper, because the first arguments have different names.\n@apply_to_both(helps={\n np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll})\ndef masked_a_helper(a, *args, **kwargs):\n data, mask = _get_data_and_masks(a)\n return data + args, mask + args, kwargs, None\n\n\n@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})\ndef masked_m_helper(m, *args, **kwargs):\n data, mask = _get_data_and_masks(m)\n return data + args, mask + args, kwargs, None\n\n\n@apply_to_both(helps={np.diag, np.diagflat})\ndef masked_v_helper(v, *args, **kwargs):\n data, mask = _get_data_and_masks(v)\n return data + args, mask + args, kwargs, None\n\n\n@apply_to_both(helps={np.delete})\ndef masked_arr_helper(array, *args, **kwargs):\n data, mask = _get_data_and_masks(array)\n return data + args, mask + args, kwargs, None\n\n\n@apply_to_both\ndef broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast array to the given shape.\n\n Like `numpy.broadcast_to`, and applied to both unmasked data and mask.\n Note that ``subok`` is taken to mean whether or not subclasses of\n the unmasked data and mask are allowed, i.e., for ``subok=False``,\n a `~astropy.utils.masked.MaskedNDArray` will be returned.\n \"\"\"\n data, mask = _get_data_and_masks(array)\n return data, mask, dict(shape=shape, subok=subok), None\n\n\n@dispatched_function\ndef outer(a, b, out=None):\n return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)\n\n\n@dispatched_function\ndef empty_like(prototype, dtype=None, order='K', subok=True, shape=None):\n \"\"\"Return a new array with the same shape and type as a given array.\n\n Like `numpy.empty_like`, but will add an empty mask.\n \"\"\"\n unmasked = np.empty_like(prototype.unmasked, dtype=dtype, order=order,\n subok=subok, shape=shape)\n if dtype is not None:\n dtype = (np.ma.make_mask_descr(unmasked.dtype)\n if unmasked.dtype.names else np.dtype('?'))\n mask = np.empty_like(prototype.mask, dtype=dtype, order=order,\n subok=subok, shape=shape)\n\n return unmasked, mask, None\n\n\n@dispatched_function\ndef zeros_like(a, dtype=None, order='K', subok=True, shape=None):\n \"\"\"Return an array of zeros with the same shape and type as a given array.\n\n Like `numpy.zeros_like`, but will add an all-false mask.\n \"\"\"\n unmasked = np.zeros_like(a.unmasked, dtype=dtype, order=order,\n subok=subok, shape=shape)\n return unmasked, False, None\n\n\n@dispatched_function\ndef ones_like(a, dtype=None, order='K', subok=True, shape=None):\n \"\"\"Return an array of ones with the same shape and type as a given array.\n\n Like `numpy.ones_like`, but will add an all-false mask.\n \"\"\"\n unmasked = np.ones_like(a.unmasked, dtype=dtype, order=order,\n subok=subok, shape=shape)\n return unmasked, False, None\n\n\n@dispatched_function\ndef full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):\n \"\"\"Return a full array with the same shape and type as a given array.\n\n Like `numpy.full_like`, but with a mask that is also set.\n If ``fill_value`` is `numpy.ma.masked`, the data will be left unset\n (i.e., as created by `numpy.empty_like`).\n \"\"\"\n result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)\n result[...] = fill_value\n return result\n\n\n@dispatched_function\ndef put(a, ind, v, mode='raise'):\n \"\"\"Replaces specified elements of an array with given values.\n\n Like `numpy.put`, but for masked array ``a`` and possibly masked\n value ``v``. Masked indices ``ind`` are not supported.\n \"\"\"\n from astropy.utils.masked import Masked\n if isinstance(ind, Masked) or not isinstance(a, Masked):\n raise NotImplementedError\n\n v_data, v_mask = a._get_data_and_mask(v)\n if v_data is not None:\n np.put(a.unmasked, ind, v_data, mode=mode)\n # v_mask of None will be correctly interpreted as False.\n np.put(a.mask, ind, v_mask, mode=mode)\n return None\n\n\n@dispatched_function\ndef putmask(a, mask, values):\n \"\"\"Changes elements of an array based on conditional and input values.\n\n Like `numpy.putmask`, but for masked array ``a`` and possibly masked\n ``values``. Masked ``mask`` is not supported.\n \"\"\"\n from astropy.utils.masked import Masked\n if isinstance(mask, Masked) or not isinstance(a, Masked):\n raise NotImplementedError\n\n values_data, values_mask = a._get_data_and_mask(values)\n if values_data is not None:\n np.putmask(a.unmasked, mask, values_data)\n np.putmask(a.mask, mask, values_mask)\n return None\n\n\n@dispatched_function\ndef place(arr, mask, vals):\n \"\"\"Change elements of an array based on conditional and input values.\n\n Like `numpy.place`, but for masked array ``a`` and possibly masked\n ``values``. Masked ``mask`` is not supported.\n \"\"\"\n from astropy.utils.masked import Masked\n if isinstance(mask, Masked) or not isinstance(arr, Masked):\n raise NotImplementedError\n\n vals_data, vals_mask = arr._get_data_and_mask(vals)\n if vals_data is not None:\n np.place(arr.unmasked, mask, vals_data)\n np.place(arr.mask, mask, vals_mask)\n return None\n\n\n@dispatched_function\ndef copyto(dst, src, casting='same_kind', where=True):\n \"\"\"Copies values from one array to another, broadcasting as necessary.\n\n Like `numpy.copyto`, but for masked destination ``dst`` and possibly\n masked source ``src``.\n \"\"\"\n from astropy.utils.masked import Masked\n if not isinstance(dst, Masked) or isinstance(where, Masked):\n raise NotImplementedError\n\n src_data, src_mask = dst._get_data_and_mask(src)\n\n if src_data is not None:\n np.copyto(dst.unmasked, src_data, casting=casting, where=where)\n if src_mask is not None:\n np.copyto(dst.mask, src_mask, where=where)\n return None\n\n\n@dispatched_function\ndef packbits(a, *args, **kwargs):\n result = np.packbits(a.unmasked, *args, **kwargs)\n mask = np.packbits(a.mask, *args, **kwargs).astype(bool)\n return result, mask, None\n\n\n@dispatched_function\ndef unpackbits(a, *args, **kwargs):\n result = np.unpackbits(a.unmasked, *args, **kwargs)\n mask = np.zeros(a.shape, dtype='u1')\n mask[a.mask] = 255\n mask = np.unpackbits(mask, *args, **kwargs).astype(bool)\n return result, mask, None\n\n\n@dispatched_function\ndef bincount(x, weights=None, minlength=0):\n \"\"\"Count number of occurrences of each value in array of non-negative ints.\n\n Like `numpy.bincount`, but masked entries in ``x`` will be skipped.\n Any masked entries in ``weights`` will lead the corresponding bin to\n be masked.\n \"\"\"\n from astropy.utils.masked import Masked\n if weights is not None:\n weights = np.asanyarray(weights)\n if isinstance(x, Masked) and x.ndim <= 1:\n # let other dimensions lead to errors.\n if weights is not None and weights.ndim == x.ndim:\n weights = weights[~x.mask]\n x = x.unmasked[~x.mask]\n mask = None\n if weights is not None:\n weights, w_mask = Masked._get_data_and_mask(weights)\n if w_mask is not None:\n mask = np.bincount(x, w_mask.astype(int),\n minlength=minlength).astype(bool)\n result = np.bincount(x, weights, minlength=0)\n return result, mask, None\n\n\n@dispatched_function\ndef msort(a):\n result = a.copy()\n result.sort(axis=0)\n return result\n\n\n@dispatched_function\ndef sort_complex(a):\n # Just a copy of function_base.sort_complex, to avoid the asarray.\n b = a.copy()\n b.sort()\n if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover\n if b.dtype.char in 'bhBH':\n return b.astype('F')\n elif b.dtype.char == 'g':\n return b.astype('G')\n else:\n return b.astype('D')\n else:\n return b\n\n\nif NUMPY_LT_1_20:\n @apply_to_both\n def concatenate(arrays, axis=0, out=None):\n data, masks = _get_data_and_masks(*arrays)\n return (data,), (masks,), dict(axis=axis), out\n\nelse:\n @dispatched_function\n def concatenate(arrays, axis=0, out=None, dtype=None, casting='same_kind'):\n data, masks = _get_data_and_masks(*arrays)\n if out is None:\n return (np.concatenate(data, axis=axis, dtype=dtype, casting=casting),\n np.concatenate(masks, axis=axis),\n None)\n else:\n from astropy.utils.masked import Masked\n if not isinstance(out, Masked):\n raise NotImplementedError\n np.concatenate(masks, out=out.mask, axis=axis)\n np.concatenate(data, out=out.unmasked, axis=axis, dtype=dtype, casting=casting)\n return out\n\n\n@apply_to_both\ndef append(arr, values, axis=None):\n data, masks = _get_data_and_masks(arr, values)\n return data, masks, dict(axis=axis), None\n\n\n@dispatched_function\ndef block(arrays):\n # We need to override block since the numpy implementation can take two\n # different paths, one for concatenation, one for creating a large empty\n # result array in which parts are set. Each assumes array input and\n # cannot be used directly. Since it would be very costly to inspect all\n # arrays and then turn them back into a nested list, we just copy here the\n # second implementation, np.core.shape_base._block_slicing, since it is\n # shortest and easiest.\n from astropy.utils.masked import Masked\n (arrays, list_ndim, result_ndim,\n final_size) = np.core.shape_base._block_setup(arrays)\n shape, slices, arrays = np.core.shape_base._block_info_recursion(\n arrays, list_ndim, result_ndim)\n dtype = np.result_type(*[arr.dtype for arr in arrays])\n F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)\n C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)\n order = 'F' if F_order and not C_order else 'C'\n result = Masked(np.empty(shape=shape, dtype=dtype, order=order))\n for the_slice, arr in zip(slices, arrays):\n result[(Ellipsis,) + the_slice] = arr\n return result\n\n\n@dispatched_function\ndef broadcast_arrays(*args, subok=True):\n \"\"\"Broadcast arrays to a common shape.\n\n Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.\n Note that ``subok`` is taken to mean whether or not subclasses of\n the unmasked data and masks are allowed, i.e., for ``subok=False``,\n `~astropy.utils.masked.MaskedNDArray` instances will be returned.\n \"\"\"\n from .core import Masked\n\n are_masked = [isinstance(arg, Masked) for arg in args]\n data = [(arg.unmasked if is_masked else arg)\n for arg, is_masked in zip(args, are_masked)]\n results = np.broadcast_arrays(*data, subok=subok)\n\n shape = results[0].shape if isinstance(results, list) else results.shape\n masks = [(np.broadcast_to(arg.mask, shape, subok=subok)\n if is_masked else None)\n for arg, is_masked in zip(args, are_masked)]\n results = [(Masked(result, mask) if mask is not None else result)\n for (result, mask) in zip(results, masks)]\n return results if len(results) > 1 else results[0]\n\n\n@apply_to_both\ndef insert(arr, obj, values, axis=None):\n \"\"\"Insert values along the given axis before the given indices.\n\n Like `numpy.insert` but for possibly masked ``arr`` and ``values``.\n Masked ``obj`` is not supported.\n \"\"\"\n from astropy.utils.masked import Masked\n if isinstance(obj, Masked) or not isinstance(arr, Masked):\n raise NotImplementedError\n\n (arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)\n return ((arr_data, obj, val_data, axis),\n (arr_mask, obj, val_mask, axis), {}, None)\n\n\nif NUMPY_LT_1_19:\n @dispatched_function\n def count_nonzero(a, axis=None):\n \"\"\"Counts the number of non-zero values in the array ``a``.\n\n Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.\n \"\"\"\n filled = a.filled(np.zeros((), a.dtype))\n return np.count_nonzero(filled, axis)\nelse:\n @dispatched_function\n def count_nonzero(a, axis=None, *, keepdims=False):\n \"\"\"Counts the number of non-zero values in the array ``a``.\n\n Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.\n \"\"\"\n filled = a.filled(np.zeros((), a.dtype))\n return np.count_nonzero(filled, axis, keepdims=keepdims)\n\n\nif NUMPY_LT_1_19:\n def _zeros_like(a, dtype=None, order='K', subok=True, shape=None):\n if shape != ():\n return np.zeros_like(a, dtype=dtype, order=order, subok=subok, shape=shape)\n else:\n return np.zeros_like(a, dtype=dtype, order=order, subok=subok,\n shape=(1,))[0]\nelse:\n _zeros_like = np.zeros_like\n\n\ndef _masked_median_1d(a, overwrite_input):\n # TODO: need an in-place mask-sorting option.\n unmasked = a.unmasked[~a.mask]\n if unmasked.size:\n return a.from_unmasked(\n np.median(unmasked, overwrite_input=overwrite_input))\n else:\n return a.from_unmasked(_zeros_like(a.unmasked, shape=(1,))[0], mask=True)\n\n\ndef _masked_median(a, axis=None, out=None, overwrite_input=False):\n # As for np.nanmedian, but without a fast option as yet.\n if axis is None or a.ndim == 1:\n part = a.ravel()\n result = _masked_median_1d(part, overwrite_input)\n else:\n result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)\n if out is not None:\n out[...] = result\n return result\n\n\n@dispatched_function\ndef median(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n from astropy.utils.masked import Masked\n if out is not None and not isinstance(out, Masked):\n raise NotImplementedError\n\n a = Masked(a)\n r, k = np.lib.function_base._ureduce(\n a, func=_masked_median, axis=axis, out=out,\n overwrite_input=overwrite_input)\n return (r.reshape(k) if keepdims else r) if out is None else out\n\n\ndef _masked_quantile_1d(a, q, **kwargs):\n \"\"\"\n Private function for rank 1 arrays. Compute quantile ignoring NaNs.\n See nanpercentile for parameter usage\n \"\"\"\n unmasked = a.unmasked[~a.mask]\n if unmasked.size:\n result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)\n return a.from_unmasked(result)\n else:\n return a.from_unmasked(_zeros_like(a.unmasked, shape=q.shape), True)\n\n\ndef _masked_quantile(a, q, axis=None, out=None, **kwargs):\n # As for np.nanmedian, but without a fast option as yet.\n if axis is None or a.ndim == 1:\n part = a.ravel()\n result = _masked_quantile_1d(part, q, **kwargs)\n else:\n result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)\n # apply_along_axis fills in collapsed axis with results.\n # Move that axis to the beginning to match percentile's\n # convention.\n if q.ndim != 0:\n result = np.moveaxis(result, axis, 0)\n\n if out is not None:\n out[...] = result\n return result\n\n\n@dispatched_function\ndef quantile(a, q, axis=None, out=None, **kwargs):\n from astropy.utils.masked import Masked\n if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):\n raise NotImplementedError\n\n a = Masked(a)\n q = np.asanyarray(q)\n if not np.lib.function_base._quantile_is_valid(q):\n raise ValueError(\"Quantiles must be in the range [0, 1]\")\n\n keepdims = kwargs.pop('keepdims', False)\n r, k = np.lib.function_base._ureduce(\n a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs)\n return (r.reshape(k) if keepdims else r) if out is None else out\n\n\n@dispatched_function\ndef percentile(a, q, *args, **kwargs):\n q = np.true_divide(q, 100)\n return quantile(a, q, *args, **kwargs)\n\n\n@dispatched_function\ndef array_equal(a1, a2, equal_nan=False):\n (a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)\n if a1d.shape != a2d.shape:\n return False\n\n equal = (a1d == a2d)\n if equal_nan:\n equal |= np.isnan(a1d) & np.isnan(a2d)\n return bool((equal | a1m | a2m).all())\n\n\n@dispatched_function\ndef array_equiv(a1, a2):\n return bool((a1 == a2).all())\n\n\n@dispatched_function\ndef where(condition, *args):\n from astropy.utils.masked import Masked\n if not args:\n return condition.nonzero(), None, None\n\n condition, c_mask = Masked._get_data_and_mask(condition)\n\n data, masks = _get_data_and_masks(*args)\n unmasked = np.where(condition, *data)\n mask = np.where(condition, *masks)\n if c_mask is not None:\n mask |= c_mask\n return Masked(unmasked, mask=mask)\n\n\n@dispatched_function\ndef choose(a, choices, out=None, mode='raise'):\n \"\"\"Construct an array from an index array and a set of arrays to choose from.\n\n Like `numpy.choose`. Masked indices in ``a`` will lead to masked output\n values and underlying data values are ignored if out of bounds (for\n ``mode='raise'``). Any values masked in ``choices`` will be propagated\n if chosen.\n\n \"\"\"\n from astropy.utils.masked import Masked\n\n a_data, a_mask = Masked._get_data_and_mask(a)\n if a_mask is not None and mode == 'raise':\n # Avoid raising on masked indices.\n a_data = a.filled(fill_value=0)\n\n kwargs = {'mode': mode}\n if out is not None:\n if not isinstance(out, Masked):\n raise NotImplementedError\n kwargs['out'] = out.unmasked\n\n data, masks = _get_data_and_masks(*choices)\n data_chosen = np.choose(a_data, data, **kwargs)\n if out is not None:\n kwargs['out'] = out.mask\n\n mask_chosen = np.choose(a_data, masks, **kwargs)\n if a_mask is not None:\n mask_chosen |= a_mask\n\n return Masked(data_chosen, mask_chosen) if out is None else out\n\n\n@apply_to_both\ndef select(condlist, choicelist, default=0):\n \"\"\"Return an array drawn from elements in choicelist, depending on conditions.\n\n Like `numpy.select`, with masks in ``choicelist`` are propagated.\n Any masks in ``condlist`` are ignored.\n\n \"\"\"\n from astropy.utils.masked import Masked\n\n condlist = [c.unmasked if isinstance(c, Masked) else c\n for c in condlist]\n\n data_list, mask_list = _get_data_and_masks(*choicelist)\n default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)\n return ((condlist, data_list, default.unmasked),\n (condlist, mask_list, default.mask), {}, None)\n\n\n@dispatched_function\ndef piecewise(x, condlist, funclist, *args, **kw):\n \"\"\"Evaluate a piecewise-defined function.\n\n Like `numpy.piecewise` but for masked input array ``x``.\n Any masks in ``condlist`` are ignored.\n\n \"\"\"\n # Copied implementation from numpy.lib.function_base.piecewise,\n # just to ensure output is Masked.\n n2 = len(funclist)\n # undocumented: single condition is promoted to a list of one condition\n if np.isscalar(condlist) or (\n not isinstance(condlist[0], (list, np.ndarray))\n and x.ndim != 0): # pragma: no cover\n condlist = [condlist]\n\n condlist = np.array(condlist, dtype=bool)\n n = len(condlist)\n\n if n == n2 - 1: # compute the \"otherwise\" condition.\n condelse = ~np.any(condlist, axis=0, keepdims=True)\n condlist = np.concatenate([condlist, condelse], axis=0)\n n += 1\n elif n != n2:\n raise ValueError(\n f\"with {n} condition(s), either {n} or {n + 1} functions are expected\"\n )\n\n # The one real change...\n y = np.zeros_like(x)\n where = []\n what = []\n for k in range(n):\n item = funclist[k]\n if not callable(item):\n where.append(condlist[k])\n what.append(item)\n else:\n vals = x[condlist[k]]\n if vals.size > 0:\n where.append(condlist[k])\n what.append(item(vals, *args, **kw))\n\n for item, value in zip(where, what):\n y[item] = value\n\n return y\n\n\n@dispatched_function\ndef interp(x, xp, fp, *args, **kwargs):\n \"\"\"One-dimensional linear interpolation.\n\n Like `numpy.interp`, but any masked points in ``xp`` and ``fp``\n are ignored. Any masked values in ``x`` will still be evaluated,\n but masked on output.\n \"\"\"\n from astropy.utils.masked import Masked\n xd, xm = Masked._get_data_and_mask(x)\n if isinstance(xp, Masked) or isinstance(fp, Masked):\n (xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)\n if xp.ndim == fp.ndim == 1:\n # Avoid making arrays 1-D; will just raise below.\n m = xpm | fpm\n xp = xp[~m]\n fp = fp[~m]\n\n result = np.interp(xd, xp, fp, *args, **kwargs)\n return result if xm is None else Masked(result, xm.copy())\n\n\n@dispatched_function\ndef lexsort(keys, axis=-1):\n \"\"\"Perform an indirect stable sort using a sequence of keys.\n\n Like `numpy.lexsort` but for possibly masked ``keys``. Masked\n values are sorted towards the end for each key.\n \"\"\"\n # Sort masks to the end.\n from .core import Masked\n\n new_keys = []\n for key in keys:\n if isinstance(key, Masked):\n # If there are other keys below, want to be sure that\n # for masked values, those other keys set the order.\n new_key = key.unmasked\n if new_keys and key.mask.any():\n new_key = new_key.copy()\n new_key[key.mask] = new_key.flat[0]\n new_keys.extend([new_key, key.mask])\n else:\n new_keys.append(key)\n\n return np.lexsort(new_keys, axis=axis)\n\n\n@dispatched_function\ndef apply_over_axes(func, a, axes):\n # Copied straight from numpy/lib/shape_base, just to omit its\n # val = asarray(a); if only it had been asanyarray, or just not there\n # since a is assumed to an an array in the next line...\n # Which is what we do here - we can only get here if it is Masked.\n val = a\n N = a.ndim\n if np.array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = np.expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError(\"function is not returning \"\n \"an array of the correct shape\")\n\n return val\n\n\nclass MaskedFormat:\n \"\"\"Formatter for masked array scalars.\n\n For use in `numpy.array2string`, wrapping the regular formatters such\n that if a value is masked, its formatted string is replaced.\n\n Typically initialized using the ``from_data`` class method.\n \"\"\"\n def __init__(self, format_function):\n self.format_function = format_function\n # Special case for structured void and subarray: we need to make all the\n # format functions for the items masked as well.\n # TODO: maybe is a separate class is more logical?\n ffs = getattr(format_function, 'format_functions', None)\n if ffs:\n # StructuredVoidFormat: multiple format functions to be changed.\n self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]\n\n ff = getattr(format_function, 'format_function', None)\n if ff:\n # SubarrayFormat: change format function for the elements.\n self.format_function.format_function = MaskedFormat(ff)\n\n def __call__(self, x):\n if x.dtype.names:\n # The replacement of x with a list is needed because the function\n # inside StructuredVoidFormat iterates over x, which works for an\n # np.void but not an array scalar.\n return self.format_function([x[field] for field in x.dtype.names])\n\n if x.shape:\n # For a subarray pass on the data directly, since the\n # items will be iterated on inside the function.\n return self.format_function(x)\n\n # Single element: first just typeset it normally, replace with masked\n # string if needed.\n string = self.format_function(x.unmasked[()])\n if x.mask:\n # Strikethrough would be neat, but terminal needs a different\n # formatting than, say, jupyter notebook.\n # return \"\\x1B[9m\"+string+\"\\x1B[29m\"\n # return ''.join(s+'\\u0336' for s in string)\n n = min(3, max(1, len(string)))\n return ' ' * (len(string)-n) + '\\u2014' * n\n else:\n return string\n\n @classmethod\n def from_data(cls, data, **options):\n from numpy.core.arrayprint import _get_format_function\n return cls(_get_format_function(data, **options))\n\n\ndef _array2string(a, options, separator=' ', prefix=\"\"):\n # Mostly copied from numpy.core.arrayprint, except:\n # - The format function is wrapped in a mask-aware class;\n # - Arrays scalars are not cast as arrays.\n from numpy.core.arrayprint import _leading_trailing, _formatArray\n\n data = np.asarray(a)\n\n if a.size > options['threshold']:\n summary_insert = \"...\"\n data = _leading_trailing(data, options['edgeitems'])\n else:\n summary_insert = \"\"\n\n # find the right formatting function for the array\n format_function = MaskedFormat.from_data(data, **options)\n\n # skip over \"[\"\n next_line_prefix = \" \"\n # skip over array(\n next_line_prefix += \" \"*len(prefix)\n\n lst = _formatArray(a, format_function, options['linewidth'],\n next_line_prefix, separator, options['edgeitems'],\n summary_insert, options['legacy'])\n return lst\n\n\n@dispatched_function\ndef array2string(a, max_line_width=None, precision=None,\n suppress_small=None, separator=' ', prefix=\"\",\n style=np._NoValue, formatter=None, threshold=None,\n edgeitems=None, sign=None, floatmode=None, suffix=\"\"):\n # Copied from numpy.core.arrayprint, but using _array2string above.\n from numpy.core.arrayprint import _make_options_dict, _format_options\n\n overrides = _make_options_dict(precision, threshold, edgeitems,\n max_line_width, suppress_small, None, None,\n sign, formatter, floatmode)\n options = _format_options.copy()\n options.update(overrides)\n\n options['linewidth'] -= len(suffix)\n\n # treat as a null array if any of shape elements == 0\n if a.size == 0:\n return \"[]\"\n\n return _array2string(a, options, separator, prefix)\n\n\n@dispatched_function\ndef array_str(a, max_line_width=None, precision=None, suppress_small=None):\n # Override to avoid special treatment of array scalars.\n return array2string(a, max_line_width, precision, suppress_small, ' ', \"\")\n\n\n# For the nanfunctions, we just treat any nan as an additional mask.\n_nanfunc_fill_values = {'nansum': 0, 'nancumsum': 0,\n 'nanprod': 1, 'nancumprod': 1}\n\n\ndef masked_nanfunc(nanfuncname):\n np_func = getattr(np, nanfuncname[3:])\n fill_value = _nanfunc_fill_values.get(nanfuncname, None)\n\n def nanfunc(a, *args, **kwargs):\n from astropy.utils.masked import Masked\n\n a, mask = Masked._get_data_and_mask(a)\n if issubclass(a.dtype.type, np.inexact):\n nans = np.isnan(a)\n mask = nans if mask is None else (nans | mask)\n\n if mask is not None:\n a = Masked(a, mask)\n if fill_value is not None:\n a = a.filled(fill_value)\n\n return np_func(a, *args, **kwargs)\n\n doc = f\"Like `numpy.{nanfuncname}`, skipping masked values as well.\\n\\n\"\n if fill_value is not None:\n # sum, cumsum, prod, cumprod\n doc += (f\"Masked/NaN values are replaced with {fill_value}. \"\n \"The output is not masked.\")\n elif \"arg\" in nanfuncname:\n doc += (\"No exceptions are raised for fully masked/NaN slices.\\n\"\n \"Instead, these give index 0.\")\n else:\n doc += (\"No warnings are given for fully masked/NaN slices.\\n\"\n \"Instead, they are masked in the output.\")\n\n nanfunc.__doc__ = doc\n nanfunc.__name__ = nanfuncname\n\n return nanfunc\n\n\nfor nanfuncname in np.lib.nanfunctions.__all__:\n globals()[nanfuncname] = dispatched_function(masked_nanfunc(nanfuncname),\n helps=getattr(np, nanfuncname))\n\n\n# Add any dispatched or helper function that has a docstring to\n# __all__, so they will be typeset by sphinx. The logic is that for\n# those presumably the use of the mask is not entirely obvious.\n__all__ += sorted(helper.__name__ for helper in (\n set(APPLY_TO_BOTH_FUNCTIONS.values())\n | set(DISPATCHED_FUNCTIONS.values())) if helper.__doc__)\n"}}},{"rowIdx":1373,"cells":{"hash":{"kind":"string","value":"60771ddfa1a7f6ed29079feabde2af2d1754418422e216152aa341df0cd4d1b1"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nThis is a collection of monkey patches and workarounds for bugs in\nearlier versions of Numpy.\n\"\"\"\n\nimport numpy as np\nfrom astropy.utils import minversion\n\n__all__ = ['NUMPY_LT_1_19', 'NUMPY_LT_1_19_1', 'NUMPY_LT_1_20',\n 'NUMPY_LT_1_21_1', 'NUMPY_LT_1_22', 'NUMPY_LT_1_22_1',\n 'NUMPY_LT_1_23', 'NUMPY_LT_1_24']\n\n# TODO: It might also be nice to have aliases to these named for specific\n# features/bugs we're checking for (ex:\n# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)\nNUMPY_LT_1_19 = not minversion(np, '1.19')\nNUMPY_LT_1_19_1 = not minversion(np, '1.19.1')\nNUMPY_LT_1_20 = not minversion(np, '1.20')\nNUMPY_LT_1_21_1 = not minversion(np, '1.21.1')\nNUMPY_LT_1_22 = not minversion(np, '1.22')\nNUMPY_LT_1_22_1 = not minversion(np, '1.22.1')\nNUMPY_LT_1_23 = not minversion(np, '1.23')\nNUMPY_LT_1_24 = not minversion(np, '1.24dev0')\n"}}},{"rowIdx":1374,"cells":{"hash":{"kind":"string","value":"484a53b378a55661f1384b2c4f941f8571814023d0bc5ed9d522db31e608c58b"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.table import QTable, hstack, vstack, join\n\nfrom astropy.utils.masked import Masked\nfrom astropy.utils.compat.optional_deps import HAS_H5PY\n\nfrom .test_masked import assert_masked_equal\n\n\nFILE_FORMATS = ['ecsv', 'fits']\nif HAS_H5PY:\n FILE_FORMATS.append('h5')\n\n\nclass MaskedArrayTableSetup:\n @classmethod\n def setup_arrays(self):\n self.a = np.array([3., 5., 0.])\n self.mask_a = np.array([True, False, False])\n\n @classmethod\n def setup_class(self):\n self.setup_arrays()\n self.ma = Masked(self.a, mask=self.mask_a)\n self.ma.info.format = '.1f'\n self.t = QTable([self.ma], names=['ma'])\n\n\nclass MaskedQuantityTableSetup(MaskedArrayTableSetup):\n @classmethod\n def setup_arrays(self):\n self.a = np.array([3., 5., 0.]) << u.m\n self.mask_a = np.array([True, False, False])\n\n\nclass TestMaskedArrayTable(MaskedArrayTableSetup):\n def test_table_initialization(self):\n assert_array_equal(self.t['ma'].unmasked, self.a)\n assert_array_equal(self.t['ma'].mask, self.mask_a)\n assert repr(self.t).splitlines()[-3:] == [\n ' ———',\n ' 5.0',\n ' 0.0']\n\n def test_info_basics(self):\n assert self.t['ma'].info.name == 'ma'\n assert 'serialize_method' in self.t['ma'].info.attr_names\n t2 = self.t.copy()\n t2['ma'].info.format = '.2f'\n t2['ma'].info.serialize_method['fits'] = 'nonsense'\n assert repr(t2).splitlines()[-3:] == [\n ' ———',\n ' 5.00',\n ' 0.00']\n # Check that if we slice, things get copied over correctly.\n t3 = t2[:2]\n assert t3['ma'].info.name == 'ma'\n assert t3['ma'].info.format == '.2f'\n assert 'serialize_method' in t3['ma'].info.attr_names\n assert t3['ma'].info.serialize_method['fits'] == 'nonsense'\n\n @pytest.mark.parametrize('file_format', FILE_FORMATS)\n def test_table_write(self, file_format, tmpdir):\n name = str(tmpdir.join(f\"a.{file_format}\"))\n kwargs = {}\n if file_format == 'h5':\n kwargs['path'] = 'trial'\n kwargs['serialize_meta'] = True\n\n self.t.write(name, **kwargs)\n t2 = QTable.read(name)\n assert isinstance(t2['ma'], self.ma.__class__)\n assert np.all(t2['ma'] == self.ma)\n assert np.all(t2['ma'].mask == self.mask_a)\n if file_format == 'fits':\n # Imperfect roundtrip through FITS native format description.\n assert self.t['ma'].info.format in t2['ma'].info.format\n else:\n assert t2['ma'].info.format == self.t['ma'].info.format\n\n @pytest.mark.parametrize('serialize_method', ['data_mask', 'null_value'])\n def test_table_write_serialization(self, serialize_method, tmpdir):\n name = str(tmpdir.join(\"test.ecsv\"))\n self.t.write(name, serialize_method=serialize_method)\n with open(name) as fh:\n lines = fh.readlines()\n\n t2 = QTable.read(name)\n assert isinstance(t2['ma'], self.ma.__class__)\n\n if serialize_method == 'data_mask':\n # Will data_mask, we have data and mask columns and should\n # exactly round-trip.\n assert len(lines[-1].split()) == 2\n assert_masked_equal(t2['ma'], self.ma)\n else:\n # With null_value we have just a data column with null values\n # marked, so we lost information on the data below the mask.\n assert len(lines[-1].split()) == 1\n assert np.all(t2['ma'] == self.ma)\n assert np.all(t2['ma'].mask == self.mask_a)\n\n def test_non_existing_serialize_method(self, tmpdir):\n name = str(tmpdir.join('bad.ecsv'))\n with pytest.raises(ValueError, match='serialize method must be'):\n self.t.write(name, serialize_method='bad_serialize_method')\n\n\nclass TestMaskedQuantityTable(TestMaskedArrayTable, MaskedQuantityTableSetup):\n # Runs tests from TestMaskedArrayTable as well as some extra ones.\n def test_table_operations_requiring_masking(self):\n t1 = self.t\n t2 = QTable({'ma2': Masked([1, 2] * u.m)})\n t12 = hstack([t1, t2], join_type='outer')\n assert np.all(t12['ma'].mask == [True, False, False])\n # 'ma2' is shorter by one so we expect one True from hstack so length matches\n assert np.all(t12['ma2'].mask == [False, False, True])\n\n t12 = hstack([t1, t2], join_type='inner')\n assert np.all(t12['ma'].mask == [True, False])\n assert np.all(t12['ma2'].mask == [False, False])\n\n # Vstack tables with different column names. In this case we get masked\n # values\n t12 = vstack([t1, t2], join_type='outer')\n # ma ma2\n # m m\n # --- ---\n # —— ——\n # 5.0 ——\n # 0.0 ——\n # —— 1.0\n # —— 2.0\n assert np.all(t12['ma'].mask == [True, False, False, True, True])\n assert np.all(t12['ma2'].mask == [True, True, True, False, False])\n\n def test_table_operations_requiring_masking_auto_promote(self):\n MaskedQuantity = Masked(u.Quantity)\n t1 = QTable({'ma1': [1, 2] * u.m})\n t2 = QTable({'ma2': [3, 4, 5] * u.m})\n t12 = hstack([t1, t2], join_type='outer')\n assert isinstance(t12['ma1'], MaskedQuantity)\n assert np.all(t12['ma1'].mask == [False, False, True])\n assert np.all(t12['ma1'] == [1, 2, 0] * u.m)\n assert not isinstance(t12['ma2'], MaskedQuantity)\n assert isinstance(t12['ma2'], u.Quantity)\n assert np.all(t12['ma2'] == [3, 4, 5] * u.m)\n\n t12 = hstack([t1, t2], join_type='inner')\n assert isinstance(t12['ma1'], u.Quantity)\n assert not isinstance(t12['ma1'], MaskedQuantity)\n assert isinstance(t12['ma2'], u.Quantity)\n assert not isinstance(t12['ma2'], MaskedQuantity)\n\n # Vstack tables with different column names. In this case we get masked\n # values\n t12 = vstack([t1, t2], join_type='outer')\n assert np.all(t12['ma1'].mask == [False, False, True, True, True])\n assert np.all(t12['ma2'].mask == [True, True, False, False, False])\n\n t1['a'] = [1, 2]\n t2['a'] = [1, 3, 4]\n t12 = join(t1, t2, join_type='outer')\n assert np.all(t12['ma1'].mask == [False, False, True, True])\n assert np.all(t12['ma2'].mask == [False, True, False, False])\n"}}},{"rowIdx":1375,"cells":{"hash":{"kind":"string","value":"bf12a1c3f784033ff51608312be8b46df5527c0dc24760167bc3f4f5f6e9e6f7"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Test masked class initialization, methods, and operators.\n\nFunctions, including ufuncs, are tested in test_functions.py\n\"\"\"\nimport operator\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.units import Quantity\nfrom astropy.coordinates import Longitude\nfrom astropy.utils.masked import Masked, MaskedNDArray\nfrom astropy.utils.compat import NUMPY_LT_1_20, NUMPY_LT_1_22\n\n\ndef assert_masked_equal(a, b):\n assert_array_equal(a.unmasked, b.unmasked)\n assert_array_equal(a.mask, b.mask)\n\n\nVARIOUS_ITEMS = [\n (1, 1),\n slice(None, 1),\n (),\n 1]\n\n\nclass ArraySetup:\n _data_cls = np.ndarray\n\n @classmethod\n def setup_class(self):\n self.a = np.arange(6.).reshape(2, 3)\n self.mask_a = np.array([[True, False, False],\n [False, True, False]])\n self.b = np.array([-3., -2., -1.])\n self.mask_b = np.array([False, True, False])\n self.c = np.array([[0.25], [0.5]])\n self.mask_c = np.array([[False], [True]])\n self.sdt = np.dtype([('a', 'f8'), ('b', 'f8')])\n self.mask_sdt = np.dtype([('a', '?'), ('b', '?')])\n self.sa = np.array([[(1., 2.), (3., 4.)],\n [(11., 12.), (13., 14.)]], dtype=self.sdt)\n self.mask_sa = np.array([[(True, True), (False, False)],\n [(False, True), (True, False)]],\n dtype=self.mask_sdt)\n self.sb = np.array([(1., 2.), (-3., 4.)], dtype=self.sdt)\n self.mask_sb = np.array([(True, False), (False, False)],\n dtype=self.mask_sdt)\n self.scdt = np.dtype([('sa', '2f8'), ('sb', 'i8', (2, 2))])\n self.sc = np.array([([1., 2.], [[1, 2], [3, 4]]),\n ([-1., -2.], [[-1, -2], [-3, -4]])],\n dtype=self.scdt)\n self.mask_scdt = np.dtype([('sa', '2?'), ('sb', '?', (2, 2))])\n self.mask_sc = np.array([([True, False], [[False, False],\n [True, True]]),\n ([False, True], [[True, False],\n [False, True]])],\n dtype=self.mask_scdt)\n\n\nclass QuantitySetup(ArraySetup):\n _data_cls = Quantity\n\n @classmethod\n def setup_class(self):\n super().setup_class()\n self.a = Quantity(self.a, u.m)\n self.b = Quantity(self.b, u.cm)\n self.c = Quantity(self.c, u.km)\n self.sa = Quantity(self.sa, u.m, dtype=self.sdt)\n self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)\n\n\nclass LongitudeSetup(ArraySetup):\n _data_cls = Longitude\n\n @classmethod\n def setup_class(self):\n super().setup_class()\n self.a = Longitude(self.a, u.deg)\n self.b = Longitude(self.b, u.deg)\n self.c = Longitude(self.c, u.deg)\n # Note: Longitude does not work on structured arrays, so\n # leaving it as regular array (which just reruns some tests).\n\n\nclass TestMaskedArrayInitialization(ArraySetup):\n def test_simple(self):\n ma = Masked(self.a, mask=self.mask_a)\n assert isinstance(ma, np.ndarray)\n assert isinstance(ma, type(self.a))\n assert isinstance(ma, Masked)\n assert_array_equal(ma.unmasked, self.a)\n assert_array_equal(ma.mask, self.mask_a)\n assert ma.mask is not self.mask_a\n assert np.may_share_memory(ma.mask, self.mask_a)\n\n def test_structured(self):\n ma = Masked(self.sa, mask=self.mask_sa)\n assert isinstance(ma, np.ndarray)\n assert isinstance(ma, type(self.sa))\n assert isinstance(ma, Masked)\n assert_array_equal(ma.unmasked, self.sa)\n assert_array_equal(ma.mask, self.mask_sa)\n assert ma.mask is not self.mask_sa\n assert np.may_share_memory(ma.mask, self.mask_sa)\n\n\ndef test_masked_ndarray_init():\n # Note: as a straight ndarray subclass, MaskedNDArray passes on\n # the arguments relevant for np.ndarray, not np.array.\n a_in = np.arange(3, dtype=int)\n m_in = np.array([True, False, False])\n buff = a_in.tobytes()\n # Check we're doing things correctly using regular ndarray.\n a = np.ndarray(shape=(3,), dtype=int, buffer=buff)\n assert_array_equal(a, a_in)\n # Check with and without mask.\n ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)\n assert_array_equal(ma.unmasked, a_in)\n assert_array_equal(ma.mask, m_in)\n ma = MaskedNDArray((3,), dtype=int, buffer=buff)\n assert_array_equal(ma.unmasked, a_in)\n assert_array_equal(ma.mask, np.zeros(3, bool))\n\n\ndef test_cannot_initialize_with_masked():\n with pytest.raises(ValueError, match='cannot handle np.ma.masked'):\n Masked(np.ma.masked)\n\n\ndef test_cannot_just_use_anything_with_a_mask_attribute():\n class my_array(np.ndarray):\n mask = True\n\n a = np.array([1., 2.]).view(my_array)\n with pytest.raises(AttributeError, match='unmasked'):\n Masked(a)\n\n\nclass TestMaskedClassCreation:\n \"\"\"Try creating a MaskedList and subclasses.\n\n By no means meant to be realistic, just to check that the basic\n machinery allows it.\n \"\"\"\n @classmethod\n def setup_class(self):\n self._base_classes_orig = Masked._base_classes.copy()\n self._masked_classes_orig = Masked._masked_classes.copy()\n\n class MaskedList(Masked, list, base_cls=list, data_cls=list):\n def __new__(cls, *args, mask=None, copy=False, **kwargs):\n self = super().__new__(cls)\n self._unmasked = self._data_cls(*args, **kwargs)\n self.mask = mask\n return self\n\n # Need to have shape for basics to work.\n @property\n def shape(self):\n return (len(self._unmasked),)\n\n self.MaskedList = MaskedList\n\n def teardown_class(self):\n Masked._base_classes = self._base_classes_orig\n Masked._masked_classes = self._masked_classes_orig\n\n def test_setup(self):\n assert issubclass(self.MaskedList, Masked)\n assert issubclass(self.MaskedList, list)\n assert Masked(list) is self.MaskedList\n\n def test_masked_list(self):\n ml = self.MaskedList(range(3), mask=[True, False, False])\n assert ml.unmasked == [0, 1, 2]\n assert_array_equal(ml.mask, np.array([True, False, False]))\n ml01 = ml[:2]\n assert ml01.unmasked == [0, 1]\n assert_array_equal(ml01.mask, np.array([True, False]))\n\n def test_from_list(self):\n ml = Masked([1, 2, 3], mask=[True, False, False])\n assert ml.unmasked == [1, 2, 3]\n assert_array_equal(ml.mask, np.array([True, False, False]))\n\n def test_masked_list_subclass(self):\n class MyList(list):\n pass\n\n ml = MyList(range(3))\n mml = Masked(ml, mask=[False, True, False])\n assert isinstance(mml, Masked)\n assert isinstance(mml, MyList)\n assert isinstance(mml.unmasked, MyList)\n assert mml.unmasked == [0, 1, 2]\n assert_array_equal(mml.mask, np.array([False, True, False]))\n\n assert Masked(MyList) is type(mml)\n\n\nclass TestMaskedNDArraySubclassCreation:\n \"\"\"Test that masked subclasses can be created directly and indirectly.\"\"\"\n @classmethod\n def setup_class(self):\n class MyArray(np.ndarray):\n def __new__(cls, *args, **kwargs):\n return np.asanyarray(*args, **kwargs).view(cls)\n\n self.MyArray = MyArray\n self.a = np.array([1., 2.]).view(self.MyArray)\n self.m = np.array([True, False], dtype=bool)\n\n def teardown_method(self, method):\n Masked._masked_classes.pop(self.MyArray, None)\n\n def test_direct_creation(self):\n assert self.MyArray not in Masked._masked_classes\n mcls = Masked(self.MyArray)\n assert issubclass(mcls, Masked)\n assert issubclass(mcls, self.MyArray)\n assert mcls.__name__ == 'MaskedMyArray'\n assert mcls.__doc__.startswith('Masked version of MyArray')\n mms = mcls(self.a, mask=self.m)\n assert isinstance(mms, mcls)\n assert_array_equal(mms.unmasked, self.a)\n assert_array_equal(mms.mask, self.m)\n\n def test_initialization_without_mask(self):\n # Default for not giving a mask should be False.\n mcls = Masked(self.MyArray)\n mms = mcls(self.a)\n assert isinstance(mms, mcls)\n assert_array_equal(mms.unmasked, self.a)\n assert_array_equal(mms.mask, np.zeros(mms.shape, bool))\n\n @pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])\n def test_initialization_with_masked_values(self, masked_array):\n mcls = Masked(self.MyArray)\n ma = masked_array(np.asarray(self.a), mask=self.m)\n mms = mcls(ma)\n assert isinstance(mms, Masked)\n assert isinstance(mms, self.MyArray)\n assert_array_equal(mms.unmasked, self.a)\n assert_array_equal(mms.mask, self.m)\n\n def test_indirect_creation(self):\n assert self.MyArray not in Masked._masked_classes\n mms = Masked(self.a, mask=self.m)\n assert isinstance(mms, Masked)\n assert isinstance(mms, self.MyArray)\n assert_array_equal(mms.unmasked, self.a)\n assert_array_equal(mms.mask, self.m)\n assert self.MyArray in Masked._masked_classes\n assert Masked(self.MyArray) is type(mms)\n\n def test_can_initialize_with_masked_values(self):\n mcls = Masked(self.MyArray)\n mms = mcls(Masked(np.asarray(self.a), mask=self.m))\n assert isinstance(mms, Masked)\n assert isinstance(mms, self.MyArray)\n assert_array_equal(mms.unmasked, self.a)\n assert_array_equal(mms.mask, self.m)\n\n def test_viewing(self):\n mms = Masked(self.a, mask=self.m)\n mms2 = mms.view()\n assert type(mms2) is mms.__class__\n assert_masked_equal(mms2, mms)\n\n ma = mms.view(np.ndarray)\n assert type(ma) is MaskedNDArray\n assert_array_equal(ma.unmasked, self.a.view(np.ndarray))\n assert_array_equal(ma.mask, self.m)\n\n\nclass TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):\n def test_masked_quantity_class_init(self):\n # TODO: class definitions should be more easily accessible.\n mcls = Masked._masked_classes[self.a.__class__]\n # This is not a very careful test.\n mq = mcls([1., 2.], mask=[True, False], unit=u.s)\n assert mq.unit == u.s\n assert np.all(mq.value.unmasked == [1., 2.])\n assert np.all(mq.value.mask == [True, False])\n assert np.all(mq.mask == [True, False])\n\n def test_masked_quantity_getting(self):\n mcls = Masked._masked_classes[self.a.__class__]\n MQ = Masked(Quantity)\n assert MQ is mcls\n\n def test_initialization_without_mask(self):\n # Default for not giving a mask should be False.\n MQ = Masked(Quantity)\n mq = MQ([1., 2.], u.s)\n assert mq.unit == u.s\n assert np.all(mq.value.unmasked == [1., 2.])\n assert np.all(mq.mask == [False, False])\n\n @pytest.mark.parametrize('masked_array', [Masked, np.ma.MaskedArray])\n def test_initialization_with_masked_values(self, masked_array):\n MQ = Masked(Quantity)\n a = np.array([1., 2.])\n m = np.array([True, False])\n ma = masked_array(a, m)\n mq = MQ(ma)\n assert isinstance(mq, Masked)\n assert isinstance(mq, Quantity)\n assert_array_equal(mq.value.unmasked, a)\n assert_array_equal(mq.mask, m)\n\n\nclass TestMaskSetting(ArraySetup):\n def test_whole_mask_setting_simple(self):\n ma = Masked(self.a)\n assert ma.mask.shape == ma.shape\n assert not ma.mask.any()\n ma.mask = True\n assert ma.mask.shape == ma.shape\n assert ma.mask.all()\n ma.mask = [[True], [False]]\n assert ma.mask.shape == ma.shape\n assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))\n ma.mask = self.mask_a\n assert ma.mask.shape == ma.shape\n assert_array_equal(ma.mask, self.mask_a)\n assert ma.mask is not self.mask_a\n assert np.may_share_memory(ma.mask, self.mask_a)\n\n def test_whole_mask_setting_structured(self):\n ma = Masked(self.sa)\n assert ma.mask.shape == ma.shape\n assert not ma.mask['a'].any() and not ma.mask['b'].any()\n ma.mask = True\n assert ma.mask.shape == ma.shape\n assert ma.mask['a'].all() and ma.mask['b'].all()\n ma.mask = [[True], [False]]\n assert ma.mask.shape == ma.shape\n assert_array_equal(ma.mask, np.array(\n [[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt))\n ma.mask = self.mask_sa\n assert ma.mask.shape == ma.shape\n assert_array_equal(ma.mask, self.mask_sa)\n assert ma.mask is not self.mask_sa\n assert np.may_share_memory(ma.mask, self.mask_sa)\n\n @pytest.mark.parametrize('item', VARIOUS_ITEMS)\n def test_part_mask_setting(self, item):\n ma = Masked(self.a)\n ma.mask[item] = True\n expected = np.zeros(ma.shape, bool)\n expected[item] = True\n assert_array_equal(ma.mask, expected)\n ma.mask[item] = False\n assert_array_equal(ma.mask, np.zeros(ma.shape, bool))\n # Mask propagation\n mask = np.zeros(self.a.shape, bool)\n ma = Masked(self.a, mask)\n ma.mask[item] = True\n assert np.may_share_memory(ma.mask, mask)\n assert_array_equal(ma.mask, mask)\n\n @pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)\n def test_part_mask_setting_structured(self, item):\n ma = Masked(self.sa)\n ma.mask[item] = True\n expected = np.zeros(ma.shape, self.mask_sdt)\n expected[item] = True\n assert_array_equal(ma.mask, expected)\n ma.mask[item] = False\n assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))\n # Mask propagation\n mask = np.zeros(self.sa.shape, self.mask_sdt)\n ma = Masked(self.sa, mask)\n ma.mask[item] = True\n assert np.may_share_memory(ma.mask, mask)\n assert_array_equal(ma.mask, mask)\n\n\n# Following are tests where we trust the initializer works.\n\n\nclass MaskedArraySetup(ArraySetup):\n @classmethod\n def setup_class(self):\n super().setup_class()\n self.ma = Masked(self.a, mask=self.mask_a)\n self.mb = Masked(self.b, mask=self.mask_b)\n self.mc = Masked(self.c, mask=self.mask_c)\n self.msa = Masked(self.sa, mask=self.mask_sa)\n self.msb = Masked(self.sb, mask=self.mask_sb)\n self.msc = Masked(self.sc, mask=self.mask_sc)\n\n\nclass TestViewing(MaskedArraySetup):\n def test_viewing_as_new_type(self):\n ma2 = self.ma.view(type(self.ma))\n assert_masked_equal(ma2, self.ma)\n\n ma3 = self.ma.view()\n assert_masked_equal(ma3, self.ma)\n\n def test_viewing_as_new_dtype(self):\n # Not very meaningful, but possible...\n ma2 = self.ma.view('c8')\n assert_array_equal(ma2.unmasked, self.a.view('c8'))\n assert_array_equal(ma2.mask, self.mask_a)\n\n @pytest.mark.parametrize('new_dtype', ['2f4', 'f8,f8,f8'])\n def test_viewing_as_new_dtype_not_implemented(self, new_dtype):\n # But cannot (yet) view in way that would need to create a new mask,\n # even though that view is possible for a regular array.\n check = self.a.view(new_dtype)\n with pytest.raises(NotImplementedError, match='different.*size'):\n self.ma.view(check.dtype)\n\n def test_viewing_as_something_impossible(self):\n with pytest.raises(TypeError):\n # Use intp to ensure have the same size as object,\n # otherwise we get a different error message\n Masked(np.array([1, 2], dtype=np.intp)).view(Masked)\n\n\nclass TestMaskedArrayCopyFilled(MaskedArraySetup):\n def test_copy(self):\n ma_copy = self.ma.copy()\n assert type(ma_copy) is type(self.ma)\n assert_array_equal(ma_copy.unmasked, self.ma.unmasked)\n assert_array_equal(ma_copy.mask, self.ma.mask)\n assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)\n assert not np.may_share_memory(ma_copy.mask, self.ma.mask)\n\n @pytest.mark.parametrize('fill_value', (0, 1))\n def test_filled(self, fill_value):\n fill_value = fill_value * getattr(self.a, 'unit', 1)\n expected = self.a.copy()\n expected[self.ma.mask] = fill_value\n result = self.ma.filled(fill_value)\n assert_array_equal(expected, result)\n\n def test_filled_no_fill_value(self):\n with pytest.raises(TypeError, match='missing 1 required'):\n self.ma.filled()\n\n @pytest.mark.parametrize('fill_value', [(0, 1), (-1, -1)])\n def test_filled_structured(self, fill_value):\n fill_value = np.array(fill_value, dtype=self.sdt)\n if hasattr(self.sa, 'unit'):\n fill_value = fill_value << self.sa.unit\n expected = self.sa.copy()\n expected['a'][self.msa.mask['a']] = fill_value['a']\n expected['b'][self.msa.mask['b']] = fill_value['b']\n result = self.msa.filled(fill_value)\n assert_array_equal(expected, result)\n\n def test_flat(self):\n ma_copy = self.ma.copy()\n ma_flat = ma_copy.flat\n # Check that single item keeps class and mask\n ma_flat1 = ma_flat[1]\n assert ma_flat1.unmasked == self.a.flat[1]\n assert ma_flat1.mask == self.mask_a.flat[1]\n # As well as getting items via iteration.\n assert all((ma.unmasked == a and ma.mask == m) for (ma, a, m)\n in zip(self.ma.flat, self.a.flat, self.mask_a.flat))\n\n # check that flat works like a view of the real array\n ma_flat[1] = self.b[1]\n assert ma_flat[1] == self.b[1]\n assert ma_copy[0, 1] == self.b[1]\n\n\nclass TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):\n pass\n\n\nclass TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):\n pass\n\n\nclass TestMaskedArrayShaping(MaskedArraySetup):\n def test_reshape(self):\n ma_reshape = self.ma.reshape((6,))\n expected_data = self.a.reshape((6,))\n expected_mask = self.mask_a.reshape((6,))\n assert ma_reshape.shape == expected_data.shape\n assert_array_equal(ma_reshape.unmasked, expected_data)\n assert_array_equal(ma_reshape.mask, expected_mask)\n\n def test_shape_setting(self):\n ma_reshape = self.ma.copy()\n ma_reshape.shape = 6,\n expected_data = self.a.reshape((6,))\n expected_mask = self.mask_a.reshape((6,))\n assert ma_reshape.shape == expected_data.shape\n assert_array_equal(ma_reshape.unmasked, expected_data)\n assert_array_equal(ma_reshape.mask, expected_mask)\n\n def test_shape_setting_failure(self):\n ma = self.ma.copy()\n with pytest.raises(ValueError, match='cannot reshape'):\n ma.shape = 5,\n\n assert ma.shape == self.ma.shape\n assert ma.mask.shape == self.ma.shape\n\n # Here, mask can be reshaped but array cannot.\n ma2 = Masked(np.broadcast_to([[1.], [2.]], self.a.shape),\n mask=self.mask_a)\n with pytest.raises(AttributeError, match='ncompatible shape'):\n ma2.shape = 6,\n\n assert ma2.shape == self.ma.shape\n assert ma2.mask.shape == self.ma.shape\n\n # Here, array can be reshaped but mask cannot.\n ma3 = Masked(self.a.copy(), mask=np.broadcast_to([[True], [False]],\n self.mask_a.shape))\n with pytest.raises(AttributeError, match='ncompatible shape'):\n ma3.shape = 6,\n\n assert ma3.shape == self.ma.shape\n assert ma3.mask.shape == self.ma.shape\n\n def test_ravel(self):\n ma_ravel = self.ma.ravel()\n expected_data = self.a.ravel()\n expected_mask = self.mask_a.ravel()\n assert ma_ravel.shape == expected_data.shape\n assert_array_equal(ma_ravel.unmasked, expected_data)\n assert_array_equal(ma_ravel.mask, expected_mask)\n\n def test_transpose(self):\n ma_transpose = self.ma.transpose()\n expected_data = self.a.transpose()\n expected_mask = self.mask_a.transpose()\n assert ma_transpose.shape == expected_data.shape\n assert_array_equal(ma_transpose.unmasked, expected_data)\n assert_array_equal(ma_transpose.mask, expected_mask)\n\n def test_iter(self):\n for ma, d, m in zip(self.ma, self.a, self.mask_a):\n assert_array_equal(ma.unmasked, d)\n assert_array_equal(ma.mask, m)\n\n\nclass MaskedItemTests(MaskedArraySetup):\n @pytest.mark.parametrize('item', VARIOUS_ITEMS)\n def test_getitem(self, item):\n ma_part = self.ma[item]\n expected_data = self.a[item]\n expected_mask = self.mask_a[item]\n assert_array_equal(ma_part.unmasked, expected_data)\n assert_array_equal(ma_part.mask, expected_mask)\n\n @pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)\n def test_getitem_structured(self, item):\n ma_part = self.msa[item]\n expected_data = self.sa[item]\n expected_mask = self.mask_sa[item]\n assert_array_equal(ma_part.unmasked, expected_data)\n assert_array_equal(ma_part.mask, expected_mask)\n\n @pytest.mark.parametrize('indices,axis', [\n ([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)])\n def test_take(self, indices, axis):\n ma_take = self.ma.take(indices, axis=axis)\n expected_data = self.a.take(indices, axis=axis)\n expected_mask = self.mask_a.take(indices, axis=axis)\n assert_array_equal(ma_take.unmasked, expected_data)\n assert_array_equal(ma_take.mask, expected_mask)\n ma_take2 = np.take(self.ma, indices, axis=axis)\n assert_masked_equal(ma_take2, ma_take)\n\n @pytest.mark.parametrize('item', VARIOUS_ITEMS)\n @pytest.mark.parametrize('mask', [None, True, False])\n def test_setitem(self, item, mask):\n base = self.ma.copy()\n expected_data = self.a.copy()\n expected_mask = self.mask_a.copy()\n value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)\n base[item] = value\n expected_data[item] = value if mask is None else value.unmasked\n expected_mask[item] = False if mask is None else value.mask\n assert_array_equal(base.unmasked, expected_data)\n assert_array_equal(base.mask, expected_mask)\n\n @pytest.mark.parametrize('item', ['a'] + VARIOUS_ITEMS)\n @pytest.mark.parametrize('mask', [None, True, False])\n def test_setitem_structured(self, item, mask):\n base = self.msa.copy()\n expected_data = self.sa.copy()\n expected_mask = self.mask_sa.copy()\n value = self.sa['b'] if item == 'a' else self.sa[0, 0]\n if mask is not None:\n value = Masked(value, mask)\n base[item] = value\n expected_data[item] = value if mask is None else value.unmasked\n expected_mask[item] = False if mask is None else value.mask\n assert_array_equal(base.unmasked, expected_data)\n assert_array_equal(base.mask, expected_mask)\n\n @pytest.mark.parametrize('item', VARIOUS_ITEMS)\n def test_setitem_np_ma_masked(self, item):\n base = self.ma.copy()\n expected_mask = self.mask_a.copy()\n base[item] = np.ma.masked\n expected_mask[item] = True\n assert_array_equal(base.unmasked, self.a)\n assert_array_equal(base.mask, expected_mask)\n\n\nclass TestMaskedArrayItems(MaskedItemTests):\n @classmethod\n def setup_class(self):\n super().setup_class()\n self.d = np.array(['aa', 'bb'])\n self.mask_d = np.array([True, False])\n self.md = Masked(self.d, self.mask_d)\n\n # Quantity, Longitude cannot hold strings.\n def test_getitem_strings(self):\n md = self.md.copy()\n md0 = md[0]\n assert md0.unmasked == self.d[0]\n assert md0.mask\n md_all = md[:]\n assert_masked_equal(md_all, md)\n\n def test_setitem_strings_np_ma_masked(self):\n md = self.md.copy()\n md[1] = np.ma.masked\n assert_array_equal(md.unmasked, self.d)\n assert_array_equal(md.mask, np.ones(2, bool))\n\n\nclass TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):\n pass\n\n\nclass TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):\n pass\n\n\nclass MaskedOperatorTests(MaskedArraySetup):\n @pytest.mark.parametrize('op', (operator.add, operator.sub))\n def test_add_subtract(self, op):\n mapmb = op(self.ma, self.mb)\n expected_data = op(self.a, self.b)\n expected_mask = (self.ma.mask | self.mb.mask)\n # Note: assert_array_equal also checks type, i.e., that, e.g.,\n # Longitude decays into an Angle.\n assert_array_equal(mapmb.unmasked, expected_data)\n assert_array_equal(mapmb.mask, expected_mask)\n\n @pytest.mark.parametrize('op', (operator.eq, operator.ne))\n def test_equality(self, op):\n mapmb = op(self.ma, self.mb)\n expected_data = op(self.a, self.b)\n expected_mask = (self.ma.mask | self.mb.mask)\n # Note: assert_array_equal also checks type, i.e., that boolean\n # output is represented as plain Masked ndarray.\n assert_array_equal(mapmb.unmasked, expected_data)\n assert_array_equal(mapmb.mask, expected_mask)\n\n def test_not_implemented(self):\n with pytest.raises(TypeError):\n self.ma > 'abc'\n\n @pytest.mark.parametrize('different_names', [False, True])\n @pytest.mark.parametrize('op', (operator.eq, operator.ne))\n def test_structured_equality(self, op, different_names):\n msb = self.msb\n if different_names:\n msb = msb.astype([(f'different_{name}', dt)\n for name, dt in msb.dtype.fields.items()])\n mapmb = op(self.msa, self.msb)\n # Expected is a bit tricky here: only unmasked fields count\n expected_data = np.ones(mapmb.shape, bool)\n expected_mask = np.ones(mapmb.shape, bool)\n for field in self.sdt.names:\n fa, mfa = self.sa[field], self.mask_sa[field]\n fb, mfb = self.sb[field], self.mask_sb[field]\n mfequal = mfa | mfb\n fequal = (fa == fb) | mfequal\n expected_data &= fequal\n expected_mask &= mfequal\n\n if op is operator.ne:\n expected_data = ~expected_data\n\n # Note: assert_array_equal also checks type, i.e., that boolean\n # output is represented as plain Masked ndarray.\n assert_array_equal(mapmb.unmasked, expected_data)\n assert_array_equal(mapmb.mask, expected_mask)\n\n def test_matmul(self):\n result = self.ma.T @ self.ma\n assert_array_equal(result.unmasked, self.a.T @ self.a)\n mask1 = np.any(self.mask_a, axis=0)\n expected_mask = np.logical_or.outer(mask1, mask1)\n assert_array_equal(result.mask, expected_mask)\n result2 = self.ma.T @ self.a\n assert_array_equal(result2.unmasked, self.a.T @ self.a)\n expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))\n assert_array_equal(result2.mask, expected_mask2)\n result3 = self.a.T @ self.ma\n assert_array_equal(result3.unmasked, self.a.T @ self.a)\n expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)\n assert_array_equal(result3.mask, expected_mask3)\n\n def test_matvec(self):\n result = self.ma @ self.mb\n assert np.all(result.mask)\n assert_array_equal(result.unmasked, self.a @ self.b)\n # Just using the masked vector still has all elements masked.\n result2 = self.a @ self.mb\n assert np.all(result2.mask)\n assert_array_equal(result2.unmasked, self.a @ self.b)\n new_ma = self.ma.copy()\n new_ma.mask[0, 0] = False\n result3 = new_ma @ self.b\n assert_array_equal(result3.unmasked, self.a @ self.b)\n assert_array_equal(result3.mask, new_ma.mask.any(-1))\n\n def test_vecmat(self):\n result = self.mb @ self.ma.T\n assert np.all(result.mask)\n assert_array_equal(result.unmasked, self.b @ self.a.T)\n result2 = self.b @ self.ma.T\n assert np.all(result2.mask)\n assert_array_equal(result2.unmasked, self.b @ self.a.T)\n new_ma = self.ma.T.copy()\n new_ma.mask[0, 0] = False\n result3 = self.b @ new_ma\n assert_array_equal(result3.unmasked, self.b @ self.a.T)\n assert_array_equal(result3.mask, new_ma.mask.any(0))\n\n def test_vecvec(self):\n result = self.mb @ self.mb\n assert result.shape == ()\n assert result.mask\n assert result.unmasked == self.b @ self.b\n mb_no_mask = Masked(self.b, False)\n result2 = mb_no_mask @ mb_no_mask\n assert not result2.mask\n\n\nclass TestMaskedArrayOperators(MaskedOperatorTests):\n # Some further tests that use strings, which are not useful for Quantity.\n @pytest.mark.parametrize('op', (operator.eq, operator.ne))\n def test_equality_strings(self, op):\n m1 = Masked(np.array(['a', 'b', 'c']), mask=[True, False, False])\n m2 = Masked(np.array(['a', 'b', 'd']), mask=[False, False, False])\n result = op(m1, m2)\n assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))\n assert_array_equal(result.mask, m1.mask | m2.mask)\n\n result2 = op(m1, m2.unmasked)\n assert_masked_equal(result2, result)\n\n def test_not_implemented(self):\n with pytest.raises(TypeError):\n Masked(['a', 'b']) > object()\n\n\nclass TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):\n pass\n\n\nclass TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):\n pass\n\n\nclass TestMaskedArrayMethods(MaskedArraySetup):\n def test_round(self):\n # Goes via ufunc, hence easy.\n mrc = self.mc.round()\n expected = Masked(self.c.round(), self.mask_c)\n assert_masked_equal(mrc, expected)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_sum(self, axis):\n ma_sum = self.ma.sum(axis)\n expected_data = self.a.sum(axis)\n expected_mask = self.ma.mask.any(axis)\n assert_array_equal(ma_sum.unmasked, expected_data)\n assert_array_equal(ma_sum.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_sum_where(self, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_sum = self.ma.sum(axis, where=where_final)\n expected_data = self.ma.unmasked.sum(axis, where=where_final)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_sum.unmasked, expected_data)\n assert_array_equal(ma_sum.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_cumsum(self, axis):\n ma_sum = self.ma.cumsum(axis)\n expected_data = self.a.cumsum(axis)\n mask = self.mask_a\n if axis is None:\n mask = mask.ravel()\n expected_mask = np.logical_or.accumulate(mask, axis=axis)\n assert_array_equal(ma_sum.unmasked, expected_data)\n assert_array_equal(ma_sum.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_mean(self, axis):\n ma_mean = self.ma.mean(axis)\n filled = self.a.copy()\n filled[self.mask_a] = 0.\n count = 1 - self.ma.mask.astype(int)\n expected_data = filled.sum(axis) / count.sum(axis)\n expected_mask = self.ma.mask.all(axis)\n assert_array_equal(ma_mean.unmasked, expected_data)\n assert_array_equal(ma_mean.mask, expected_mask)\n\n def test_mean_int16(self):\n ma = self.ma.astype('i2')\n ma_mean = ma.mean()\n assert ma_mean.dtype == 'f8'\n expected = ma.astype('f8').mean()\n assert_masked_equal(ma_mean, expected)\n\n def test_mean_float16(self):\n ma = self.ma.astype('f2')\n ma_mean = ma.mean()\n assert ma_mean.dtype == 'f2'\n expected = self.ma.mean().astype('f2')\n assert_masked_equal(ma_mean, expected)\n\n def test_mean_inplace(self):\n expected = self.ma.mean(1)\n out = Masked(np.zeros_like(expected.unmasked))\n result = self.ma.mean(1, out=out)\n assert result is out\n assert_masked_equal(out, expected)\n\n @pytest.mark.xfail(NUMPY_LT_1_20, reason=\"'where' keyword argument not supported for numpy < 1.20\")\n @pytest.mark.filterwarnings(\"ignore:.*encountered in.*divide\")\n @pytest.mark.filterwarnings(\"ignore:Mean of empty slice\")\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_mean_where(self, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_mean = self.ma.mean(axis, where=where)\n expected_data = self.ma.unmasked.mean(axis, where=where_final)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_mean.unmasked, expected_data)\n assert_array_equal(ma_mean.mask, expected_mask)\n\n @pytest.mark.filterwarnings(\"ignore:.*encountered in.*divide\")\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_var(self, axis):\n ma_var = self.ma.var(axis)\n filled = (self.a - self.ma.mean(axis, keepdims=True))**2\n filled[self.mask_a] = 0.\n count = (1 - self.ma.mask.astype(int)).sum(axis)\n expected_data = filled.sum(axis) / count\n expected_mask = self.ma.mask.all(axis)\n assert_array_equal(ma_var.unmasked, expected_data)\n assert_array_equal(ma_var.mask, expected_mask)\n ma_var1 = self.ma.var(axis, ddof=1)\n expected_data1 = filled.sum(axis) / (count - 1)\n expected_mask1 = self.ma.mask.all(axis) | (count <= 1)\n assert_array_equal(ma_var1.unmasked, expected_data1)\n assert_array_equal(ma_var1.mask, expected_mask1)\n ma_var5 = self.ma.var(axis, ddof=5)\n assert np.all(~np.isfinite(ma_var5.unmasked))\n assert ma_var5.mask.all()\n\n def test_var_int16(self):\n ma = self.ma.astype('i2')\n ma_var = ma.var()\n assert ma_var.dtype == 'f8'\n expected = ma.astype('f8').var()\n assert_masked_equal(ma_var, expected)\n\n @pytest.mark.xfail(NUMPY_LT_1_20, reason=\"'where' keyword argument not supported for numpy < 1.20\")\n @pytest.mark.filterwarnings(\"ignore:.*encountered in.*divide\")\n @pytest.mark.filterwarnings(\"ignore:Degrees of freedom <= 0 for slice\")\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_var_where(self, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_var = self.ma.var(axis, where=where)\n expected_data = self.ma.unmasked.var(axis, where=where_final)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_var.unmasked, expected_data)\n assert_array_equal(ma_var.mask, expected_mask)\n\n def test_std(self):\n ma_std = self.ma.std(1, ddof=1)\n ma_var1 = self.ma.var(1, ddof=1)\n expected = np.sqrt(ma_var1)\n assert_masked_equal(ma_std, expected)\n\n def test_std_inplace(self):\n expected = self.ma.std(1, ddof=1)\n out = Masked(np.zeros_like(expected.unmasked))\n result = self.ma.std(1, ddof=1, out=out)\n assert result is out\n assert_masked_equal(result, expected)\n\n @pytest.mark.xfail(NUMPY_LT_1_20, reason=\"'where' keyword argument not supported for numpy < 1.20\")\n @pytest.mark.filterwarnings(\"ignore:.*encountered in.*divide\")\n @pytest.mark.filterwarnings(\"ignore:Degrees of freedom <= 0 for slice\")\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_std_where(self, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_std = self.ma.std(axis, where=where)\n expected_data = self.ma.unmasked.std(axis, where=where_final)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_std.unmasked, expected_data)\n assert_array_equal(ma_std.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_min(self, axis):\n ma_min = self.ma.min(axis)\n filled = self.a.copy()\n filled[self.mask_a] = self.a.max()\n expected_data = filled.min(axis)\n assert_array_equal(ma_min.unmasked, expected_data)\n assert not np.any(ma_min.mask)\n\n def test_min_with_masked_nan(self):\n ma = Masked([3., np.nan, 2.], mask=[False, True, False])\n ma_min = ma.min()\n assert_array_equal(ma_min.unmasked, np.array(2.))\n assert not ma_min.mask\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_min_where(self, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_min = self.ma.min(axis, where=where_final, initial=np.inf)\n expected_data = self.ma.unmasked.min(axis, where=where_final, initial=np.inf)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_min.unmasked, expected_data)\n assert_array_equal(ma_min.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_max(self, axis):\n ma_max = self.ma.max(axis)\n filled = self.a.copy()\n filled[self.mask_a] = self.a.min()\n expected_data = filled.max(axis)\n assert_array_equal(ma_max.unmasked, expected_data)\n assert not np.any(ma_max.mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_max_where(self, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_max = self.ma.max(axis, where=where_final, initial=-np.inf)\n expected_data = self.ma.unmasked.max(axis, where=where_final, initial=-np.inf)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_max.unmasked, expected_data)\n assert_array_equal(ma_max.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_argmin(self, axis):\n ma_argmin = self.ma.argmin(axis)\n filled = self.a.copy()\n filled[self.mask_a] = self.a.max()\n expected_data = filled.argmin(axis)\n assert_array_equal(ma_argmin, expected_data)\n\n def test_argmin_only_one_unmasked_element(self):\n # Regression test for example from @taldcroft at\n # https://github.com/astropy/astropy/pull/11127#discussion_r600864559\n ma = Masked(data=[1, 2], mask=[True, False])\n assert ma.argmin() == 1\n\n if not NUMPY_LT_1_22:\n def test_argmin_keepdims(self):\n ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])\n assert_array_equal(ma.argmin(axis=0, keepdims=True),\n np.array([[1, 0]]))\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_argmax(self, axis):\n ma_argmax = self.ma.argmax(axis)\n filled = self.a.copy()\n filled[self.mask_a] = self.a.min()\n expected_data = filled.argmax(axis)\n assert_array_equal(ma_argmax, expected_data)\n\n if not NUMPY_LT_1_22:\n def test_argmax_keepdims(self):\n ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])\n assert_array_equal(ma.argmax(axis=1, keepdims=True),\n np.array([[1], [1]]))\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_argsort(self, axis):\n ma_argsort = self.ma.argsort(axis)\n filled = self.a.copy()\n filled[self.mask_a] = self.a.max() * 1.1\n expected_data = filled.argsort(axis)\n assert_array_equal(ma_argsort, expected_data)\n\n @pytest.mark.parametrize('order', [None, 'a', ('a', 'b'), ('b', 'a')])\n @pytest.mark.parametrize('axis', [0, 1])\n def test_structured_argsort(self, axis, order):\n ma_argsort = self.msa.argsort(axis, order=order)\n filled = self.msa.filled(fill_value=np.array((np.inf, np.inf),\n dtype=self.sdt))\n expected_data = filled.argsort(axis, order=order)\n assert_array_equal(ma_argsort, expected_data)\n\n def test_argsort_error(self):\n with pytest.raises(ValueError, match='when the array has no fields'):\n self.ma.argsort(axis=0, order='a')\n\n @pytest.mark.parametrize('axis', (0, 1))\n def test_sort(self, axis):\n ma_sort = self.ma.copy()\n ma_sort.sort(axis)\n indices = self.ma.argsort(axis)\n expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)\n expected_mask = np.take_along_axis(self.ma.mask, indices, axis)\n assert_array_equal(ma_sort.unmasked, expected_data)\n assert_array_equal(ma_sort.mask, expected_mask)\n\n @pytest.mark.parametrize('kth', [1, 3])\n def test_argpartition(self, kth):\n ma = self.ma.ravel()\n ma_argpartition = ma.argpartition(kth)\n partitioned = ma[ma_argpartition]\n assert (partitioned[:kth] < partitioned[kth]).all()\n assert (partitioned[kth:] >= partitioned[kth]).all()\n if partitioned[kth].mask:\n assert all(partitioned.mask[kth:])\n else:\n assert not any(partitioned.mask[:kth])\n\n @pytest.mark.parametrize('kth', [1, 3])\n def test_partition(self, kth):\n partitioned = self.ma.flatten()\n partitioned.partition(kth)\n assert (partitioned[:kth] < partitioned[kth]).all()\n assert (partitioned[kth:] >= partitioned[kth]).all()\n if partitioned[kth].mask:\n assert all(partitioned.mask[kth:])\n else:\n assert not any(partitioned.mask[:kth])\n\n def test_all_explicit(self):\n a1 = np.array([[1., 2.],\n [3., 4.]])\n a2 = np.array([[1., 0.],\n [3., 4.]])\n if self._data_cls is not np.ndarray:\n a1 = self._data_cls(a1, self.a.unit)\n a2 = self._data_cls(a2, self.a.unit)\n ma1 = Masked(a1, mask=[[False, False],\n [True, True]])\n ma2 = Masked(a2, mask=[[False, True],\n [False, True]])\n ma1_eq_ma2 = ma1 == ma2\n assert_array_equal(ma1_eq_ma2.unmasked, np.array([[True, False],\n [True, True]]))\n assert_array_equal(ma1_eq_ma2.mask, np.array([[False, True],\n [True, True]]))\n assert ma1_eq_ma2.all()\n assert not (ma1 != ma2).all()\n ma_eq1 = ma1_eq_ma2.all(1)\n assert_array_equal(ma_eq1.mask, np.array([False, True]))\n assert bool(ma_eq1[0]) is True\n assert bool(ma_eq1[1]) is False\n ma_eq0 = ma1_eq_ma2.all(0)\n assert_array_equal(ma_eq0.mask, np.array([False, True]))\n assert bool(ma_eq1[0]) is True\n assert bool(ma_eq1[1]) is False\n\n @pytest.mark.parametrize('method', ['any', 'all'])\n @pytest.mark.parametrize('array,axis', [\n ('a', 0), ('a', 1), ('a', None),\n ('b', None),\n ('c', 0), ('c', 1), ('c', None)])\n def test_all_and_any(self, array, axis, method):\n ma = getattr(self, 'm'+array)\n ma_eq = ma == ma\n ma_all_or_any = getattr(ma_eq, method)(axis=axis)\n filled = ma_eq.unmasked.copy()\n filled[ma_eq.mask] = method == 'all'\n a_all_or_any = getattr(filled, method)(axis=axis)\n all_masked = ma.mask.all(axis)\n assert_array_equal(ma_all_or_any.mask, all_masked)\n assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)\n # interpretation as bool\n as_bool = [bool(a) for a in ma_all_or_any.ravel()]\n expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]\n assert as_bool == expected\n\n def test_any_inplace(self):\n ma_eq = self.ma == self.ma\n expected = ma_eq.any(1)\n out = Masked(np.zeros_like(expected.unmasked))\n result = ma_eq.any(1, out=out)\n assert result is out\n assert_masked_equal(result, expected)\n\n @pytest.mark.xfail(NUMPY_LT_1_20, reason=\"'where' keyword argument not supported for numpy < 1.20\")\n @pytest.mark.parametrize('method', ('all', 'any'))\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_all_and_any_where(self, method, axis):\n where = np.array([\n [True, False, False, ],\n [True, True, True, ],\n ])\n where_final = ~self.ma.mask & where\n ma_eq = self.ma == self.ma\n ma_any = getattr(ma_eq, method)(axis, where=where)\n expected_data = getattr(ma_eq.unmasked, method)(axis, where=where_final)\n expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis, where=where_final) | (~where_final).all(axis)\n assert_array_equal(ma_any.unmasked, expected_data)\n assert_array_equal(ma_any.mask, expected_mask)\n\n @pytest.mark.parametrize('offset', (0, 1))\n def test_diagonal(self, offset):\n mda = self.ma.diagonal(offset=offset)\n expected = Masked(self.a.diagonal(offset=offset),\n self.mask_a.diagonal(offset=offset))\n assert_masked_equal(mda, expected)\n\n @pytest.mark.parametrize('offset', (0, 1))\n def test_trace(self, offset):\n mta = self.ma.trace(offset=offset)\n expected = Masked(self.a.trace(offset=offset),\n self.mask_a.trace(offset=offset, dtype=bool))\n assert_masked_equal(mta, expected)\n\n def test_clip(self):\n maclip = self.ma.clip(self.b, self.c)\n expected = Masked(self.a.clip(self.b, self.c), self.mask_a)\n assert_masked_equal(maclip, expected)\n\n def test_clip_masked_min_max(self):\n maclip = self.ma.clip(self.mb, self.mc)\n # Need to be careful with min, max because of Longitude, which wraps.\n dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()\n dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()\n expected = Masked(self.a.clip(self.mb.filled(dmin),\n self.mc.filled(dmax)),\n mask=self.mask_a)\n assert_masked_equal(maclip, expected)\n\n\nclass TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):\n pass\n\n\nclass TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):\n pass\n\n\nclass TestMaskedArrayProductMethods(MaskedArraySetup):\n # These cannot work on Quantity, so done separately\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_prod(self, axis):\n ma_sum = self.ma.prod(axis)\n expected_data = self.a.prod(axis)\n expected_mask = self.ma.mask.any(axis)\n assert_array_equal(ma_sum.unmasked, expected_data)\n assert_array_equal(ma_sum.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1, None))\n def test_cumprod(self, axis):\n ma_sum = self.ma.cumprod(axis)\n expected_data = self.a.cumprod(axis)\n mask = self.mask_a\n if axis is None:\n mask = mask.ravel()\n expected_mask = np.logical_or.accumulate(mask, axis=axis)\n assert_array_equal(ma_sum.unmasked, expected_data)\n assert_array_equal(ma_sum.mask, expected_mask)\n\n\ndef test_masked_str_explicit():\n sa = np.array([(1., 2.), (3., 4.)], dtype='f8,f8')\n msa = Masked(sa, [(False, True), (False, False)])\n assert str(msa) == \"[(1., ——) (3., 4.)]\"\n assert str(msa[0]) == \"(1., ——)\"\n assert str(msa[1]) == \"(3., 4.)\"\n with np.printoptions(precision=3, floatmode='fixed'):\n assert str(msa) == \"[(1.000, ———) (3.000, 4.000)]\"\n\n\ndef test_masked_repr_explicit():\n # Use explicit endianness to ensure tests pass on all architectures\n sa = np.array([(1., 2.), (3., 4.)], dtype='>f8,>f8')\n msa = Masked(sa, [(False, True), (False, False)])\n assert repr(msa) == (\"MaskedNDArray([(1., ——), (3., 4.)], \"\n \"dtype=[('f0', '>f8'), ('f1', '>f8')])\")\n assert repr(msa[0]) == (\"MaskedNDArray((1., ——), \"\n \"dtype=[('f0', '>f8'), ('f1', '>f8')])\")\n assert repr(msa[1]) == (\"MaskedNDArray((3., 4.), \"\n \"dtype=[('f0', '>f8'), ('f1', '>f8')])\")\n\n\ndef test_masked_repr_summary():\n ma = Masked(np.arange(15.), mask=[True]+[False]*14)\n with np.printoptions(threshold=2):\n assert repr(ma) == (\n \"MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])\")\n\n\ndef test_masked_repr_nodata():\n assert repr(Masked([])) == \"MaskedNDArray([], dtype=float64)\"\n\n\nclass TestMaskedArrayRepr(MaskedArraySetup):\n def test_array_str(self):\n # very blunt check they work at all.\n str(self.ma)\n str(self.mb)\n str(self.mc)\n str(self.msa)\n str(self.msb)\n str(self.msc)\n\n def test_scalar_str(self):\n assert self.mb[0].shape == ()\n str(self.mb[0])\n assert self.msb[0].shape == ()\n str(self.msb[0])\n assert self.msc[0].shape == ()\n str(self.msc[0])\n\n def test_array_repr(self):\n repr(self.ma)\n repr(self.mb)\n repr(self.mc)\n repr(self.msa)\n repr(self.msb)\n repr(self.msc)\n\n def test_scalar_repr(self):\n repr(self.mb[0])\n repr(self.msb[0])\n repr(self.msc[0])\n\n\nclass TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):\n pass\n\n\nclass TestMaskedRecarray(MaskedArraySetup):\n @classmethod\n def setup_class(self):\n super().setup_class()\n self.ra = self.sa.view(np.recarray)\n self.mra = Masked(self.ra, mask=self.mask_sa)\n\n def test_recarray_setup(self):\n assert isinstance(self.mra, Masked)\n assert isinstance(self.mra, np.recarray)\n assert np.all(self.mra.unmasked == self.ra)\n assert np.all(self.mra.mask == self.mask_sa)\n assert_array_equal(self.mra.view(np.ndarray), self.sa)\n assert isinstance(self.mra.a, Masked)\n assert_array_equal(self.mra.a.unmasked, self.sa['a'])\n assert_array_equal(self.mra.a.mask, self.mask_sa['a'])\n\n def test_recarray_setting(self):\n mra = self.mra.copy()\n mra.a = self.msa['b']\n assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)\n assert_array_equal(mra.a.mask, self.msa['b'].mask)\n\n @pytest.mark.parametrize('attr', [0, 'a'])\n def test_recarray_field_getting(self, attr):\n mra_a = self.mra.field(attr)\n assert isinstance(mra_a, Masked)\n assert_array_equal(mra_a.unmasked, self.sa['a'])\n assert_array_equal(mra_a.mask, self.mask_sa['a'])\n\n @pytest.mark.parametrize('attr', [0, 'a'])\n def test_recarray_field_setting(self, attr):\n mra = self.mra.copy()\n mra.field(attr, self.msa['b'])\n assert_array_equal(mra.a.unmasked, self.msa['b'].unmasked)\n assert_array_equal(mra.a.mask, self.msa['b'].mask)\n\n\nclass TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):\n def test_masked_array_from_masked(self):\n \"\"\"Check that we can initialize a MaskedArray properly.\"\"\"\n np_ma = np.ma.MaskedArray(self.ma)\n assert type(np_ma) is np.ma.MaskedArray\n assert type(np_ma.data) is self._data_cls\n assert type(np_ma.mask) is np.ndarray\n assert_array_equal(np_ma.data, self.a)\n assert_array_equal(np_ma.mask, self.mask_a)\n\n def test_view_as_masked_array(self):\n \"\"\"Test that we can be viewed as a MaskedArray.\"\"\"\n np_ma = self.ma.view(np.ma.MaskedArray)\n assert type(np_ma) is np.ma.MaskedArray\n assert type(np_ma.data) is self._data_cls\n assert type(np_ma.mask) is np.ndarray\n assert_array_equal(np_ma.data, self.a)\n assert_array_equal(np_ma.mask, self.mask_a)\n\n\nclass TestMaskedQuantityInteractionWithNumpyMA(\n TestMaskedArrayInteractionWithNumpyMA, QuantitySetup):\n pass\n"}}},{"rowIdx":1376,"cells":{"hash":{"kind":"string","value":"7305b7c093c44b443e70b4decfdca72f68b97ab595e3c6d7bacfbd8ffdd68385"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord, representation as r\nfrom astropy.time import Time\n\nfrom astropy.utils.masked import Masked\n\n\nclass TestRepresentations:\n def setup_class(self):\n self.x = np.array([3., 5., 0.]) << u.m\n self.y = np.array([4., 12., 1.]) << u.m\n self.z = np.array([0., 0., 1.]) << u.m\n self.c = r.CartesianRepresentation(self.x, self.y, self.z)\n self.mask = np.array([False, False, True])\n self.mx = Masked(self.x, self.mask)\n self.my = Masked(self.y, self.mask)\n self.mz = Masked(self.z, self.mask)\n self.mc = r.CartesianRepresentation(self.mx, self.my, self.mz)\n\n def test_initialization(self):\n check = self.mc.z == self.mz\n assert_array_equal(check.unmasked, np.ones(3, bool))\n assert_array_equal(check.mask, self.mask)\n assert_array_equal(self.mc.x, self.mx)\n assert_array_equal(self.mc.y, self.my)\n assert_array_equal(self.mc.z, self.mz)\n\n def test_norm(self):\n # Need stacking and erfa override.\n norm = self.mc.norm()\n assert_array_equal(norm.unmasked, self.c.norm())\n assert_array_equal(norm.mask, self.mask)\n\n def test_transformation(self):\n msr = self.mc.represent_as(r.SphericalRepresentation)\n sr = self.c.represent_as(r.SphericalRepresentation)\n for comp in msr.components:\n mc = getattr(msr, comp)\n c = getattr(sr, comp)\n assert_array_equal(mc.unmasked, c)\n assert_array_equal(mc.mask, self.mask)\n\n # Transformation back. This also tests erfa.ufunc.s2p, which\n # is special in having a core dimension only in the output.\n cr2 = sr.represent_as(r.CartesianRepresentation)\n mcr2 = msr.represent_as(r.CartesianRepresentation)\n for comp in mcr2.components:\n mc = getattr(mcr2, comp)\n c = getattr(cr2, comp)\n assert_array_equal(mc.unmasked, c)\n assert_array_equal(mc.mask, self.mask)\n\n\nclass TestSkyCoord:\n def setup_class(self):\n self.ra = np.array([3., 5., 0.]) << u.hourangle\n self.dec = np.array([4., 12., 1.]) << u.deg\n self.sc = SkyCoord(self.ra, self.dec)\n self.mask = np.array([False, False, True])\n self.mra = Masked(self.ra, self.mask)\n self.mdec = Masked(self.dec, self.mask)\n self.msc = SkyCoord(self.mra, self.mdec)\n\n def test_initialization(self):\n check = self.msc.dec == self.mdec\n assert_array_equal(check.unmasked, np.ones(3, bool))\n assert_array_equal(check.mask, self.mask)\n assert_array_equal(self.msc.data.lon, self.mra)\n assert_array_equal(self.msc.data.lat, self.mdec)\n\n def test_transformation(self):\n gcrs = self.sc.gcrs\n mgcrs = self.msc.gcrs\n assert_array_equal(mgcrs.data.lon.mask, self.msc.data.lon.mask)\n assert_array_equal(mgcrs.data.lon.unmasked, gcrs.data.lon)\n assert_array_equal(mgcrs.data.lat.unmasked, gcrs.data.lat)\n\n\nclass TestTime:\n def setup_class(self):\n self.s = np.array(['2010-11-12T13:14:15.160',\n '2010-11-12T13:14:15.161',\n '2011-12-13T14:15:16.170'])\n self.t = Time(self.s)\n # Time formats will currently strip any ndarray subtypes, so we cannot\n # initialize a Time with a Masked version of self.s yet. Instead, we\n # work around it, for now only testing that masked are preserved by\n # transformations.\n self.mask = np.array([False, False, True])\n self.mt = self.t._apply(Masked, self.mask)\n\n def test_initialization(self):\n assert_array_equal(self.mt.jd1.mask, self.mask)\n assert_array_equal(self.mt.jd2.mask, self.mask)\n assert_array_equal(self.mt.jd1.unmasked, self.t.jd1)\n assert_array_equal(self.mt.jd2.unmasked, self.t.jd2)\n\n @pytest.mark.parametrize('format_', ['jd', 'cxcsec', 'jyear'])\n def test_different_formats(self, format_):\n # Formats do not yet work with everything; e.g., isot is not supported\n # since the Masked class does not yet support structured arrays.\n tfmt = getattr(self.t, format_)\n mtfmt = getattr(self.mt, format_)\n check = mtfmt == tfmt\n assert_array_equal(check.unmasked, np.ones(3, bool))\n assert_array_equal(check.mask, self.mask)\n\n @pytest.mark.parametrize('scale', ['tai', 'tcb', 'ut1'])\n def test_transformation(self, scale):\n tscl = getattr(self.t, scale)\n mtscl = getattr(self.mt, scale)\n assert_array_equal(mtscl.jd1.mask, self.mask)\n assert_array_equal(mtscl.jd2.mask, self.mask)\n assert_array_equal(mtscl.jd1.unmasked, tscl.jd1)\n assert_array_equal(mtscl.jd2.unmasked, tscl.jd2)\n"}}},{"rowIdx":1377,"cells":{"hash":{"kind":"string","value":"5788a5760fc9af2b983413284971c50089e12dfd648e61b0ff4938eb9a1b5960"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Test all functions covered by __array_function__.\n\nHere, run through all functions, with simple tests just to check the helpers.\nMore complicated tests of functionality, including with subclasses, are done\nin test_functions.\n\nTODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)\n- np.linalg\n- np.fft (is there any point?)\n- np.lib.nanfunctions\n\n\"\"\"\nimport inspect\nimport itertools\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom astropy.utils.compat import NUMPY_LT_1_19, NUMPY_LT_1_20, NUMPY_LT_1_23\nfrom astropy.units.tests.test_quantity_non_ufuncs import (\n get_wrapped_functions)\n\nfrom astropy.utils.masked import Masked, MaskedNDArray\nfrom astropy.utils.masked.function_helpers import (\n MASKED_SAFE_FUNCTIONS,\n APPLY_TO_BOTH_FUNCTIONS,\n DISPATCHED_FUNCTIONS,\n IGNORED_FUNCTIONS,\n UNSUPPORTED_FUNCTIONS)\n\nfrom .test_masked import assert_masked_equal, MaskedArraySetup\n\n\nall_wrapped_functions = get_wrapped_functions(np)\nall_wrapped = set(all_wrapped_functions.values())\n\n\nclass BasicTestSetup(MaskedArraySetup):\n def check(self, func, *args, **kwargs):\n out = func(self.ma, *args, **kwargs)\n expected = Masked(func(self.a, *args, **kwargs),\n mask=func(self.mask_a, *args, **kwargs))\n assert_masked_equal(out, expected)\n\n def check2(self, func, *args, **kwargs):\n out = func(self.ma, self.mb, *args, **kwargs)\n expected = Masked(func(self.a, self.b, *args, **kwargs),\n mask=func(self.mask_a, self.mask_b, *args, **kwargs))\n if isinstance(out, (tuple, list)):\n for o, x in zip(out, expected):\n assert_masked_equal(o, x)\n else:\n assert_masked_equal(out, expected)\n\n\nclass NoMaskTestSetup(MaskedArraySetup):\n def check(self, func, *args, **kwargs):\n o = func(self.ma, *args, **kwargs)\n expected = func(self.a, *args, **kwargs)\n assert_array_equal(o, expected)\n\n\nclass InvariantMaskTestSetup(MaskedArraySetup):\n def check(self, func, *args, **kwargs):\n o = func(self.ma, *args, **kwargs)\n expected = func(self.a, *args, **kwargs)\n assert_array_equal(o.unmasked, expected)\n assert_array_equal(o.mask, self.mask_a)\n\n\nclass TestShapeInformation(BasicTestSetup):\n def test_shape(self):\n assert np.shape(self.ma) == (2, 3)\n\n def test_size(self):\n assert np.size(self.ma) == 6\n\n def test_ndim(self):\n assert np.ndim(self.ma) == 2\n\n\nclass TestShapeManipulation(BasicTestSetup):\n # Note: do not parametrize the below, since test names are used\n # to check coverage.\n def test_reshape(self):\n self.check(np.reshape, (6, 1))\n\n def test_ravel(self):\n self.check(np.ravel)\n\n def test_moveaxis(self):\n self.check(np.moveaxis, 0, 1)\n\n def test_rollaxis(self):\n self.check(np.rollaxis, 0, 2)\n\n def test_swapaxes(self):\n self.check(np.swapaxes, 0, 1)\n\n def test_transpose(self):\n self.check(np.transpose)\n\n def test_atleast_1d(self):\n self.check(np.atleast_1d)\n o, so = np.atleast_1d(self.mb[0], self.mc[0])\n assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)\n\n def test_atleast_2d(self):\n self.check(np.atleast_2d)\n o, so = np.atleast_2d(self.mb[0], self.mc[0])\n assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)\n\n def test_atleast_3d(self):\n self.check(np.atleast_3d)\n o, so = np.atleast_3d(self.mb[0], self.mc[0])\n assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)\n\n def test_expand_dims(self):\n self.check(np.expand_dims, 1)\n\n def test_squeeze(self):\n o = np.squeeze(self.mc)\n assert o.shape == o.mask.shape == (2,)\n assert_array_equal(o.unmasked, self.c.squeeze())\n assert_array_equal(o.mask, self.mask_c.squeeze())\n\n def test_flip(self):\n self.check(np.flip)\n\n def test_fliplr(self):\n self.check(np.fliplr)\n\n def test_flipud(self):\n self.check(np.flipud)\n\n def test_rot90(self):\n self.check(np.rot90)\n\n def test_broadcast_to(self):\n self.check(np.broadcast_to, (3, 2, 3))\n self.check(np.broadcast_to, (3, 2, 3), subok=False)\n\n def test_broadcast_arrays(self):\n self.check2(np.broadcast_arrays)\n self.check2(np.broadcast_arrays, subok=False)\n\n\nclass TestArgFunctions(MaskedArraySetup):\n def check(self, function, *args, fill_value=np.nan, **kwargs):\n o = function(self.ma, *args, **kwargs)\n a_filled = self.ma.filled(fill_value=fill_value)\n expected = function(a_filled, *args, **kwargs)\n assert_array_equal(o, expected)\n\n def test_argmin(self):\n self.check(np.argmin, fill_value=np.inf)\n\n def test_argmax(self):\n self.check(np.argmax, fill_value=-np.inf)\n\n def test_argsort(self):\n self.check(np.argsort, fill_value=np.nan)\n\n def test_lexsort(self):\n self.check(np.lexsort, fill_value=np.nan)\n\n def test_nonzero(self):\n self.check(np.nonzero, fill_value=0.)\n\n @pytest.mark.filterwarnings('ignore:Calling nonzero on 0d arrays is deprecated')\n def test_nonzero_0d(self):\n res1 = Masked(1, mask=False).nonzero()\n assert len(res1) == 1\n assert_array_equal(res1[0], np.ones(()).nonzero()[0])\n res2 = Masked(1, mask=True).nonzero()\n assert len(res2) == 1\n assert_array_equal(res2[0], np.zeros(()).nonzero()[0])\n\n def test_argwhere(self):\n self.check(np.argwhere, fill_value=0.)\n\n def test_argpartition(self):\n self.check(np.argpartition, 2, fill_value=np.inf)\n\n def test_flatnonzero(self):\n self.check(np.flatnonzero, fill_value=0.)\n\n\nclass TestAlongAxis(MaskedArraySetup):\n def test_take_along_axis(self):\n indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)\n out = np.take_along_axis(self.ma, indices, axis=0)\n expected = np.take_along_axis(self.a, indices, axis=0)\n expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_put_along_axis(self):\n ma = self.ma.copy()\n indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)\n np.put_along_axis(ma, indices, axis=0, values=-1)\n expected = self.a.copy()\n np.put_along_axis(expected, indices, axis=0, values=-1)\n assert_array_equal(ma.unmasked, expected)\n assert_array_equal(ma.mask, self.mask_a)\n np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)\n assert_array_equal(ma.unmasked, expected)\n expected_mask = self.mask_a.copy()\n np.put_along_axis(expected_mask, indices, axis=0, values=True)\n assert_array_equal(ma.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', (0, 1))\n def test_apply_along_axis(self, axis):\n out = np.apply_along_axis(np.square, axis, self.ma)\n expected = np.apply_along_axis(np.square, axis, self.a)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, self.mask_a)\n\n @pytest.mark.parametrize('axes', [(1,), 0, (0, -1)])\n def test_apply_over_axes(self, axes):\n def function(x, axis):\n return np.mean(np.square(x), axis)\n\n out = np.apply_over_axes(function, self.ma, axes)\n expected = self.ma\n for axis in (axes if isinstance(axes, tuple) else (axes,)):\n expected = (expected**2).mean(axis, keepdims=True)\n assert_array_equal(out.unmasked, expected.unmasked)\n assert_array_equal(out.mask, expected.mask)\n\n def test_apply_over_axes_no_reduction(self):\n out = np.apply_over_axes(np.cumsum, self.ma, 0)\n expected = self.ma.cumsum(axis=0)\n assert_masked_equal(out, expected)\n\n def test_apply_over_axes_wrong_size(self):\n with pytest.raises(ValueError, match='not.*correct shape'):\n np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)\n\n\nclass TestIndicesFrom(NoMaskTestSetup):\n @classmethod\n def setup_class(self):\n self.a = np.arange(9).reshape(3, 3)\n self.mask_a = np.eye(3, dtype=bool)\n self.ma = Masked(self.a, self.mask_a)\n\n def test_diag_indices_from(self):\n self.check(np.diag_indices_from)\n\n def test_triu_indices_from(self):\n self.check(np.triu_indices_from)\n\n def test_tril_indices_from(self):\n self.check(np.tril_indices_from)\n\n\nclass TestRealImag(InvariantMaskTestSetup):\n @classmethod\n def setup_class(self):\n self.a = np.array([1+2j, 3+4j])\n self.mask_a = np.array([True, False])\n self.ma = Masked(self.a, mask=self.mask_a)\n\n def test_real(self):\n self.check(np.real)\n\n def test_imag(self):\n self.check(np.imag)\n\n\nclass TestCopyAndCreation(InvariantMaskTestSetup):\n def test_copy(self):\n self.check(np.copy)\n # Also as kwarg\n copy = np.copy(a=self.ma)\n assert_array_equal(copy, self.ma)\n\n def test_asfarray(self):\n self.check(np.asfarray)\n farray = np.asfarray(a=self.ma)\n assert_array_equal(farray, self.ma)\n\n\nclass TestArrayCreation(MaskedArraySetup):\n def test_empty_like(self):\n o = np.empty_like(self.ma)\n assert o.shape == (2, 3)\n assert isinstance(o, Masked)\n assert isinstance(o, np.ndarray)\n o2 = np.empty_like(prototype=self.ma)\n assert o2.shape == (2, 3)\n assert isinstance(o2, Masked)\n assert isinstance(o2, np.ndarray)\n o3 = np.empty_like(self.ma, subok=False)\n assert type(o3) is MaskedNDArray\n\n def test_zeros_like(self):\n o = np.zeros_like(self.ma)\n assert_array_equal(o.unmasked, np.zeros_like(self.a))\n assert_array_equal(o.mask, np.zeros_like(self.mask_a))\n o2 = np.zeros_like(a=self.ma)\n assert_array_equal(o2.unmasked, np.zeros_like(self.a))\n assert_array_equal(o2.mask, np.zeros_like(self.mask_a))\n\n def test_ones_like(self):\n o = np.ones_like(self.ma)\n assert_array_equal(o.unmasked, np.ones_like(self.a))\n assert_array_equal(o.mask, np.zeros_like(self.mask_a))\n o2 = np.ones_like(a=self.ma)\n assert_array_equal(o2.unmasked, np.ones_like(self.a))\n assert_array_equal(o2.mask, np.zeros_like(self.mask_a))\n\n @pytest.mark.parametrize('value', [0.5, Masked(0.5, mask=True), np.ma.masked])\n def test_full_like(self, value):\n o = np.full_like(self.ma, value)\n if value is np.ma.masked:\n expected = Masked(o.unmasked, True)\n else:\n expected = Masked(np.empty_like(self.a))\n expected[...] = value\n assert_array_equal(o.unmasked, expected.unmasked)\n assert_array_equal(o.mask, expected.mask)\n\n\nclass TestAccessingParts(BasicTestSetup):\n def test_diag(self):\n self.check(np.diag)\n\n def test_diag_1d_input(self):\n ma = self.ma.ravel()\n o = np.diag(ma)\n assert_array_equal(o.unmasked, np.diag(self.a.ravel()))\n assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))\n\n def test_diagonal(self):\n self.check(np.diagonal)\n\n def test_diagflat(self):\n self.check(np.diagflat)\n\n def test_compress(self):\n o = np.compress([True, False], self.ma, axis=0)\n expected = np.compress([True, False], self.a, axis=0)\n expected_mask = np.compress([True, False], self.mask_a, axis=0)\n assert_array_equal(o.unmasked, expected)\n assert_array_equal(o.mask, expected_mask)\n\n def test_extract(self):\n o = np.extract([True, False, True], self.ma)\n expected = np.extract([True, False, True], self.a)\n expected_mask = np.extract([True, False, True], self.mask_a)\n assert_array_equal(o.unmasked, expected)\n assert_array_equal(o.mask, expected_mask)\n\n def test_delete(self):\n self.check(np.delete, slice(1, 2), 0)\n self.check(np.delete, [0, 2], 1)\n\n def test_roll(self):\n self.check(np.roll, 1)\n self.check(np.roll, 1, axis=0)\n\n def test_take(self):\n self.check(np.take, [0, 1], axis=1)\n self.check(np.take, 1)\n\n\nclass TestSettingParts(MaskedArraySetup):\n def test_put(self):\n ma = self.ma.copy()\n v = Masked([50, 150], [False, True])\n np.put(ma, [0, 2], v)\n expected = self.a.copy()\n np.put(expected, [0, 2], [50, 150])\n expected_mask = self.mask_a.copy()\n np.put(expected_mask, [0, 2], [False, True])\n assert_array_equal(ma.unmasked, expected)\n assert_array_equal(ma.mask, expected_mask)\n\n with pytest.raises(TypeError):\n # Indices cannot be masked.\n np.put(ma, Masked([0, 2]), v)\n\n with pytest.raises(TypeError):\n # Array to put masked values in must be masked.\n np.put(self.a.copy(), [0, 2], v)\n\n def test_putmask(self):\n ma = self.ma.flatten()\n mask = [True, False, False, False, True, False]\n values = Masked(np.arange(100, 650, 100),\n mask=[False, True, True, True, False, False])\n np.putmask(ma, mask, values)\n expected = self.a.flatten()\n np.putmask(expected, mask, values.unmasked)\n expected_mask = self.mask_a.flatten()\n np.putmask(expected_mask, mask, values.mask)\n assert_array_equal(ma.unmasked, expected)\n assert_array_equal(ma.mask, expected_mask)\n\n with pytest.raises(TypeError):\n np.putmask(self.a.flatten(), mask, values)\n\n def test_place(self):\n ma = self.ma.flatten()\n mask = [True, False, False, False, True, False]\n values = Masked([100, 200], mask=[False, True])\n np.place(ma, mask, values)\n expected = self.a.flatten()\n np.place(expected, mask, values.unmasked)\n expected_mask = self.mask_a.flatten()\n np.place(expected_mask, mask, values.mask)\n assert_array_equal(ma.unmasked, expected)\n assert_array_equal(ma.mask, expected_mask)\n\n with pytest.raises(TypeError):\n np.place(self.a.flatten(), mask, values)\n\n def test_copyto(self):\n ma = self.ma.flatten()\n mask = [True, False, False, False, True, False]\n values = Masked(np.arange(100, 650, 100),\n mask=[False, True, True, True, False, False])\n np.copyto(ma, values, where=mask)\n expected = self.a.flatten()\n np.copyto(expected, values.unmasked, where=mask)\n expected_mask = self.mask_a.flatten()\n np.copyto(expected_mask, values.mask, where=mask)\n assert_array_equal(ma.unmasked, expected)\n assert_array_equal(ma.mask, expected_mask)\n\n with pytest.raises(TypeError):\n np.copyto(self.a.flatten(), values, where=mask)\n\n @pytest.mark.parametrize('value', [0.25, np.ma.masked])\n def test_fill_diagonal(self, value):\n ma = self.ma[:2, :2].copy()\n np.fill_diagonal(ma, value)\n expected = ma.copy()\n expected[np.diag_indices_from(expected)] = value\n assert_array_equal(ma.unmasked, expected.unmasked)\n assert_array_equal(ma.mask, expected.mask)\n\n\nclass TestRepeat(BasicTestSetup):\n def test_tile(self):\n self.check(np.tile, 2)\n\n def test_repeat(self):\n self.check(np.repeat, 2)\n\n def test_resize(self):\n self.check(np.resize, (4, 4))\n\n\nclass TestConcatenate(MaskedArraySetup):\n # More tests at TestMaskedArrayConcatenation in test_functions.\n def check(self, func, *args, **kwargs):\n ma_list = kwargs.pop('ma_list', [self.ma, self.ma])\n a_list = [Masked(ma).unmasked for ma in ma_list]\n m_list = [Masked(ma).mask for ma in ma_list]\n o = func(ma_list, *args, **kwargs)\n expected = func(a_list, *args, **kwargs)\n expected_mask = func(m_list, *args, **kwargs)\n assert_array_equal(o.unmasked, expected)\n assert_array_equal(o.mask, expected_mask)\n\n def test_concatenate(self):\n self.check(np.concatenate)\n self.check(np.concatenate, axis=1)\n self.check(np.concatenate, ma_list=[self.a, self.ma])\n if not NUMPY_LT_1_20:\n # Check that we can accept a dtype argument (introduced in numpy 1.20)\n self.check(np.concatenate, dtype='f4')\n\n out = Masked(np.empty((4, 3)))\n result = np.concatenate([self.ma, self.ma], out=out)\n assert out is result\n expected = np.concatenate([self.a, self.a])\n expected_mask = np.concatenate([self.mask_a, self.mask_a])\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n with pytest.raises(TypeError):\n np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))\n\n def test_stack(self):\n self.check(np.stack)\n\n def test_column_stack(self):\n self.check(np.column_stack)\n\n def test_hstack(self):\n self.check(np.hstack)\n\n def test_vstack(self):\n self.check(np.vstack)\n\n def test_dstack(self):\n self.check(np.dstack)\n\n def test_block(self):\n self.check(np.block)\n\n out = np.block([[0., Masked(1., True)],\n [Masked(1, False), Masked(2, False)]])\n expected = np.array([[0, 1.], [1, 2]])\n expected_mask = np.array([[False, True], [False, False]])\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_append(self):\n out = np.append(self.ma, self.mc, axis=1)\n expected = np.append(self.a, self.c, axis=1)\n expected_mask = np.append(self.mask_a, self.mask_c, axis=1)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_insert(self):\n obj = (1, 1)\n values = Masked([50., 25.], mask=[True, False])\n out = np.insert(self.ma.flatten(), obj, values)\n expected = np.insert(self.a.flatten(), obj, [50., 25.])\n expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n with pytest.raises(TypeError):\n np.insert(self.a.flatten(), obj, values)\n\n with pytest.raises(TypeError):\n np.insert(self.ma.flatten(), Masked(obj), values)\n\n\nclass TestSplit:\n @classmethod\n def setup_class(self):\n self.a = np.arange(54.).reshape(3, 3, 6)\n self.mask_a = np.zeros(self.a.shape, dtype=bool)\n self.mask_a[1, 1, 1] = True\n self.mask_a[0, 1, 4] = True\n self.mask_a[1, 2, 5] = True\n self.ma = Masked(self.a, mask=self.mask_a)\n\n def check(self, func, *args, **kwargs):\n out = func(self.ma, *args, **kwargs)\n expected = func(self.a, *args, **kwargs)\n expected_mask = func(self.mask_a, *args, **kwargs)\n assert len(out) == len(expected)\n for o, x, xm in zip(out, expected, expected_mask):\n assert_array_equal(o.unmasked, x)\n assert_array_equal(o.mask, xm)\n\n def test_split(self):\n self.check(np.split, [1])\n\n def test_array_split(self):\n self.check(np.array_split, 2)\n\n def test_hsplit(self):\n self.check(np.hsplit, [1, 4])\n\n def test_vsplit(self):\n self.check(np.vsplit, [1])\n\n def test_dsplit(self):\n self.check(np.dsplit, [1])\n\n\nclass TestMethodLikes(MaskedArraySetup):\n def check(self, function, *args, method=None, **kwargs):\n if method is None:\n method = function.__name__\n\n o = function(self.ma, *args, **kwargs)\n x = getattr(self.ma, method)(*args, **kwargs)\n assert_masked_equal(o, x)\n\n def test_amax(self):\n self.check(np.amax, method='max')\n\n def test_amin(self):\n self.check(np.amin, method='min')\n\n def test_sum(self):\n self.check(np.sum)\n\n def test_cumsum(self):\n self.check(np.cumsum)\n\n def test_any(self):\n self.check(np.any)\n\n def test_all(self):\n self.check(np.all)\n\n def test_sometrue(self):\n self.check(np.sometrue, method='any')\n\n def test_alltrue(self):\n self.check(np.alltrue, method='all')\n\n def test_prod(self):\n self.check(np.prod)\n\n def test_product(self):\n self.check(np.product, method='prod')\n\n def test_cumprod(self):\n self.check(np.cumprod)\n\n def test_cumproduct(self):\n self.check(np.cumproduct, method='cumprod')\n\n def test_ptp(self):\n self.check(np.ptp)\n self.check(np.ptp, axis=0)\n\n def test_round_(self):\n self.check(np.round_, method='round')\n\n def test_around(self):\n self.check(np.around, method='round')\n\n def test_clip(self):\n self.check(np.clip, 2., 4.)\n self.check(np.clip, self.mb, self.mc)\n\n def test_mean(self):\n self.check(np.mean)\n\n def test_std(self):\n self.check(np.std)\n\n def test_var(self):\n self.check(np.var)\n\n\nclass TestUfuncLike(InvariantMaskTestSetup):\n def test_fix(self):\n self.check(np.fix)\n\n def test_angle(self):\n a = np.array([1+0j, 0+1j, 1+1j, 0+0j])\n mask_a = np.array([True, False, True, False])\n ma = Masked(a, mask=mask_a)\n out = np.angle(ma)\n expected = np.angle(ma.unmasked)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, mask_a)\n\n def test_i0(self):\n self.check(np.i0)\n\n def test_sinc(self):\n self.check(np.sinc)\n\n def test_where(self):\n mask = [True, False, True]\n out = np.where(mask, self.ma, 1000.)\n expected = np.where(mask, self.a, 1000.)\n expected_mask = np.where(mask, self.mask_a, False)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n mask2 = Masked(mask, [True, False, False])\n out2 = np.where(mask2, self.ma, 1000.)\n expected2 = np.where(mask, self.a, 1000.)\n expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask\n assert_array_equal(out2.unmasked, expected2)\n assert_array_equal(out2.mask, expected_mask2)\n\n def test_where_single_arg(self):\n m = Masked(np.arange(3), mask=[True, False, False])\n out = np.where(m)\n expected = m.nonzero()\n assert isinstance(out, tuple) and len(out) == 1\n assert_array_equal(out[0], expected[0])\n\n def test_where_wrong_number_of_arg(self):\n with pytest.raises(ValueError, match='either both or neither'):\n np.where([True, False, False], self.a)\n\n def test_choose(self):\n a = np.array([0, 1]).reshape((2, 1))\n result = np.choose(a, (self.ma, self.mb))\n expected = np.choose(a, (self.a, self.b))\n expected_mask = np.choose(a, (self.mask_a, self.mask_b))\n assert_array_equal(result.unmasked, expected)\n assert_array_equal(result.mask, expected_mask)\n\n out = np.zeros_like(result)\n result2 = np.choose(a, (self.ma, self.mb), out=out)\n assert result2 is out\n assert_array_equal(result2, result)\n\n with pytest.raises(TypeError):\n np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))\n\n def test_choose_masked(self):\n ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))\n out = ma.choose((self.ma, self.mb))\n expected = np.choose(ma.filled(0), (self.a, self.b))\n expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n with pytest.raises(ValueError):\n ma.unmasked.choose((self.ma, self.mb))\n\n @pytest.mark.parametrize('default', [-1., np.ma.masked, Masked(-1, mask=True)])\n def test_select(self, default):\n a, mask_a, ma = self.a, self.mask_a, self.ma\n out = np.select([a < 1.5, a > 3.5], [ma, ma+1], default=default)\n expected = np.select([a < 1.5, a > 3.5], [a, a+1],\n default=-1 if default is not np.ma.masked else 0)\n expected_mask = np.select([a < 1.5, a > 3.5], [mask_a, mask_a],\n default=getattr(default, 'mask', False))\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_real_if_close(self):\n a = np.array([1+0j, 0+1j, 1+1j, 0+0j])\n mask_a = np.array([True, False, True, False])\n ma = Masked(a, mask=mask_a)\n out = np.real_if_close(ma)\n expected = np.real_if_close(a)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, mask_a)\n\n def test_tril(self):\n self.check(np.tril)\n\n def test_triu(self):\n self.check(np.triu)\n\n def test_unwrap(self):\n self.check(np.unwrap)\n\n def test_nan_to_num(self):\n self.check(np.nan_to_num)\n ma = Masked([np.nan, 1.], mask=[True, False])\n o = np.nan_to_num(ma, copy=False)\n assert_masked_equal(o, Masked([0., 1.], mask=[True, False]))\n assert ma is o\n\n\nclass TestUfuncLikeTests:\n @classmethod\n def setup_class(self):\n self.a = np.array([[-np.inf, +np.inf, np.nan, 3., 4.]]*2)\n self.mask_a = np.array([[False]*5, [True]*4+[False]])\n self.ma = Masked(self.a, mask=self.mask_a)\n self.b = np.array([[3.0001], [3.9999]])\n self.mask_b = np.array([[True], [False]])\n self.mb = Masked(self.b, mask=self.mask_b)\n\n def check(self, func):\n out = func(self.ma)\n expected = func(self.a)\n assert type(out) is MaskedNDArray\n assert out.dtype.kind == 'b'\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, self.mask_a)\n assert not np.may_share_memory(out.mask, self.mask_a)\n\n def test_isposinf(self):\n self.check(np.isposinf)\n\n def test_isneginf(self):\n self.check(np.isneginf)\n\n def test_isreal(self):\n self.check(np.isreal)\n o = np.isreal(Masked([1. + 1j], mask=False))\n assert not o.unmasked and not o.mask\n o = np.isreal(Masked([1. + 1j], mask=True))\n assert not o.unmasked and o.mask\n\n def test_iscomplex(self):\n self.check(np.iscomplex)\n o = np.iscomplex(Masked([1. + 1j], mask=False))\n assert o.unmasked and not o.mask\n o = np.iscomplex(Masked([1. + 1j], mask=True))\n assert o.unmasked and o.mask\n\n def test_isclose(self):\n out = np.isclose(self.ma, self.mb, atol=0.01)\n expected = np.isclose(self.ma, self.mb, atol=0.01)\n expected_mask = self.mask_a | self.mask_b\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_allclose(self):\n out = np.allclose(self.ma, self.mb, atol=0.01)\n expected = np.isclose(self.ma, self.mb,\n atol=0.01)[self.mask_a | self.mask_b].all()\n assert_array_equal(out, expected)\n\n def test_array_equal(self):\n assert not np.array_equal(self.ma, self.ma)\n assert not np.array_equal(self.ma, self.a)\n if not NUMPY_LT_1_19:\n assert np.array_equal(self.ma, self.ma, equal_nan=True)\n assert np.array_equal(self.ma, self.a, equal_nan=True)\n assert not np.array_equal(self.ma, self.mb)\n ma2 = self.ma.copy()\n ma2.mask |= np.isnan(self.a)\n assert np.array_equal(ma2, self.ma)\n\n def test_array_equiv(self):\n assert np.array_equiv(self.mb, self.mb)\n assert np.array_equiv(self.mb, self.b)\n assert not np.array_equiv(self.ma, self.mb)\n assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))\n\n\nclass TestOuterLikeFunctions(MaskedArraySetup):\n def test_outer(self):\n result = np.outer(self.ma, self.mb)\n expected_data = np.outer(self.a.ravel(), self.b.ravel())\n expected_mask = np.logical_or.outer(self.mask_a.ravel(),\n self.mask_b.ravel())\n assert_array_equal(result.unmasked, expected_data)\n assert_array_equal(result.mask, expected_mask)\n\n out = np.zeros_like(result)\n result2 = np.outer(self.ma, self.mb, out=out)\n assert result2 is out\n assert result2 is not result\n assert_masked_equal(result2, result)\n\n out2 = np.zeros_like(result.unmasked)\n with pytest.raises(TypeError):\n np.outer(self.ma, self.mb, out=out2)\n\n def test_kron(self):\n result = np.kron(self.ma, self.mb)\n expected_data = np.kron(self.a, self.b)\n expected_mask = np.logical_or.outer(self.mask_a,\n self.mask_b).reshape(result.shape)\n assert_array_equal(result.unmasked, expected_data)\n assert_array_equal(result.mask, expected_mask)\n\n\nclass TestReductionLikeFunctions(MaskedArraySetup):\n def test_average(self):\n o = np.average(self.ma)\n assert_masked_equal(o, self.ma.mean())\n\n o = np.average(self.ma, weights=self.mb, axis=-1)\n expected = np.average(self.a, weights=self.b, axis=-1)\n expected_mask = (self.mask_a | self.mask_b).any(-1)\n assert_array_equal(o.unmasked, expected)\n assert_array_equal(o.mask, expected_mask)\n\n def test_trace(self):\n o = np.trace(self.ma)\n expected = np.trace(self.a)\n expected_mask = np.trace(self.mask_a).astype(bool)\n assert_array_equal(o.unmasked, expected)\n assert_array_equal(o.mask, expected_mask)\n\n @pytest.mark.parametrize('axis', [0, 1, None])\n def test_count_nonzero(self, axis):\n o = np.count_nonzero(self.ma, axis=axis)\n expected = np.count_nonzero(self.ma.filled(0), axis=axis)\n assert_array_equal(o, expected)\n\n\n@pytest.mark.filterwarnings('ignore:all-nan')\nclass TestPartitionLikeFunctions:\n @classmethod\n def setup_class(self):\n self.a = np.arange(36.).reshape(6, 6)\n self.mask_a = np.zeros_like(self.a, bool)\n # On purpose fill diagonal, so we get all masked elements.\n self.mask_a[np.tril_indices_from(self.a)] = True\n self.ma = Masked(self.a, mask=self.mask_a)\n\n def check(self, function, *args, **kwargs):\n o = function(self.ma, *args, **kwargs)\n nanfunc = getattr(np, 'nan'+function.__name__)\n nanfilled = self.ma.filled(np.nan)\n expected = nanfunc(nanfilled, *args, **kwargs)\n assert_array_equal(o.filled(np.nan), expected)\n assert_array_equal(o.mask, np.isnan(expected))\n\n if not kwargs.get('axis', 1):\n # no need to test for all\n return\n\n out = np.zeros_like(o)\n o2 = function(self.ma, *args, out=out, **kwargs)\n assert o2 is out\n assert_masked_equal(o2, o)\n with pytest.raises(TypeError):\n function(self.ma, *args, out=np.zeros_like(expected), **kwargs)\n\n @pytest.mark.parametrize('axis', [None, 0, 1])\n def test_median(self, axis):\n self.check(np.median, axis=axis)\n\n @pytest.mark.parametrize('axis', [None, 0, 1])\n def test_quantile(self, axis):\n self.check(np.quantile, q=[0.25, 0.5], axis=axis)\n\n def test_quantile_out_of_range(self):\n with pytest.raises(ValueError, match='must be in the range'):\n np.quantile(self.ma, q=1.5)\n\n @pytest.mark.parametrize('axis', [None, 0, 1])\n def test_percentile(self, axis):\n self.check(np.percentile, q=50, axis=axis)\n\n\nclass TestIntDiffFunctions(MaskedArraySetup):\n def test_diff(self):\n out = np.diff(self.ma)\n expected = np.diff(self.a)\n expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_diff_prepend_append(self):\n out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)\n expected = np.diff(self.a, prepend=-1, append=1.)\n mask = np.concatenate([np.ones((2, 1), bool),\n self.mask_a,\n np.zeros((2, 1), bool)], axis=-1)\n expected_mask = mask[:, 1:] | mask[:, :-1]\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_trapz(self):\n ma = self.ma.copy()\n ma.mask[1] = False\n out = np.trapz(ma)\n assert_array_equal(out.unmasked, np.trapz(self.a))\n assert_array_equal(out.mask, np.array([True, False]))\n\n def test_gradient(self):\n out = np.gradient(self.ma)\n expected = np.gradient(self.a)\n expected_mask = [(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),\n np.stack([\n self.mask_a[:, 0] | self.mask_a[:, 1],\n self.mask_a[:, 0] | self.mask_a[:, 2],\n self.mask_a[:, 1] | self.mask_a[:, 2]], axis=-1)]\n\n for o, x, m in zip(out, expected, expected_mask):\n assert_array_equal(o.unmasked, x)\n assert_array_equal(o.mask, m)\n\n\nclass TestSpaceFunctions:\n @classmethod\n def setup_class(self):\n self.a = np.arange(1., 7.).reshape(2, 3)\n self.mask_a = np.array([[True, False, False],\n [False, True, False]])\n self.ma = Masked(self.a, mask=self.mask_a)\n self.b = np.array([2.5, 10., 3.])\n self.mask_b = np.array([False, True, False])\n self.mb = Masked(self.b, mask=self.mask_b)\n\n def check(self, function, *args, **kwargs):\n out = function(self.ma, self.mb, 5)\n expected = function(self.a, self.b, 5)\n expected_mask = np.broadcast_to(self.mask_a | self.mask_b,\n expected.shape).copy()\n # TODO: make implementation that also ensures start point mask is\n # determined just by start point? (as for geomspace in numpy 1.20)?\n expected_mask[-1] = self.mask_b\n if not NUMPY_LT_1_20 and function is np.geomspace:\n expected_mask[0] = self.mask_a\n\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_linspace(self):\n self.check(np.linspace, 5)\n\n def test_logspace(self):\n self.check(np.logspace, 10)\n\n def test_geomspace(self):\n self.check(np.geomspace, 5)\n\n\nclass TestInterpolationFunctions(MaskedArraySetup):\n def test_interp(self):\n xp = np.arange(5.)\n fp = np.array([1., 5., 6., 19., 20.])\n mask_fp = np.array([False, False, False, True, False])\n mfp = Masked(fp, mask=mask_fp)\n x = np.array([1.5, 17.])\n mask_x = np.array([False, True])\n mx = Masked(x, mask=mask_x)\n out = np.interp(mx, xp, mfp)\n expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, mask_x)\n\n def test_piecewise(self):\n condlist = [self.a < 1, self.a >= 1]\n out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.])\n expected = np.piecewise(self.a, condlist, [-1, 1.])\n expected_mask = np.piecewise(self.mask_a, condlist, [True, False])\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n condlist2 = [self.a < 1, self.a >= 3]\n out2 = np.piecewise(self.ma, condlist2,\n [Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.),\n mask=~x.mask)])\n expected = np.piecewise(self.a, condlist2, [-1, 1, 2])\n expected_mask = np.piecewise(self.mask_a, condlist2,\n [True, False, lambda x: ~x])\n assert_array_equal(out2.unmasked, expected)\n assert_array_equal(out2.mask, expected_mask)\n\n with pytest.raises(ValueError, match='with 2 condition'):\n np.piecewise(self.ma, condlist2, [])\n\n def test_regression_12978(self):\n \"\"\"Regression tests for https://github.com/astropy/astropy/pull/12978\"\"\"\n # This case produced incorrect results\n mask = [False, True, False]\n x = np.array([1, 2, 3])\n xp = Masked(np.array([1, 2, 3]), mask=mask)\n fp = Masked(np.array([1, 2, 3]), mask=mask)\n result = np.interp(x, xp, fp)\n assert_array_equal(result, x)\n\n # This case raised a ValueError\n xp = np.array([1, 3])\n fp = Masked(np.array([1, 3]))\n result = np.interp(x, xp, fp)\n assert_array_equal(result, x)\n\n\nclass TestBincount(MaskedArraySetup):\n def test_bincount(self):\n i = np.array([1, 1, 2, 3, 2, 4])\n mask_i = np.array([True, False, False, True, False, False])\n mi = Masked(i, mask=mask_i)\n out = np.bincount(mi)\n expected = np.bincount(i[~mask_i])\n assert_array_equal(out, expected)\n w = np.arange(len(i))\n mask_w = np.array([True]+[False]*5)\n mw = Masked(w, mask=mask_w)\n out2 = np.bincount(i, mw)\n expected = np.bincount(i, w)\n expected_mask = np.array([False, True, False, False, False])\n assert_array_equal(out2.unmasked, expected)\n assert_array_equal(out2.mask, expected_mask)\n\n out3 = np.bincount(mi, mw)\n expected = np.bincount(i[~mask_i], w[~mask_i])\n expected_mask = np.array([False, False, False, False, False])\n assert_array_equal(out3.unmasked, expected)\n assert_array_equal(out3.mask, expected_mask)\n\n\nclass TestSortFunctions(MaskedArraySetup):\n def test_sort(self):\n o = np.sort(self.ma)\n expected = self.ma.copy()\n expected.sort()\n assert_masked_equal(o, expected)\n\n def test_sort_complex(self):\n ma = Masked(np.array([1+2j, 0+4j, 3+0j, -1-1j]),\n mask=[True, False, False, False])\n o = np.sort_complex(ma)\n indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))\n expected = ma[indx]\n assert_masked_equal(o, expected)\n\n def test_msort(self):\n o = np.msort(self.ma)\n expected = np.sort(self.ma, axis=0)\n assert_masked_equal(o, expected)\n\n def test_partition(self):\n o = np.partition(self.ma, 1)\n expected = self.ma.copy()\n expected.partition(1)\n assert_masked_equal(o, expected)\n\n\nclass TestStringFunctions:\n # More elaborate tests done in test_masked.py\n @classmethod\n def setup_class(self):\n self.ma = Masked(np.arange(3), mask=[True, False, False])\n\n def test_array2string(self):\n out0 = np.array2string(self.ma)\n assert out0 == '[— 1 2]'\n # Arguments are interpreted as usual.\n out1 = np.array2string(self.ma, separator=', ')\n assert out1 == '[—, 1, 2]'\n # If we do pass in a formatter, though, it should be used.\n out2 = np.array2string(self.ma, separator=', ', formatter={'all': hex})\n assert out2 == '[———, 0x1, 0x2]'\n # Also as positional argument (no, nobody will do this!)\n out3 = np.array2string(self.ma, None, None, None, ', ', '',\n np._NoValue, {'int': hex})\n assert out3 == out2\n # But not if the formatter is not relevant for us.\n out4 = np.array2string(self.ma, separator=', ', formatter={'float': hex})\n assert out4 == out1\n\n def test_array_repr(self):\n out = np.array_repr(self.ma)\n assert out == 'MaskedNDArray([—, 1, 2])'\n ma2 = self.ma.astype('f4')\n out2 = np.array_repr(ma2)\n assert out2 == 'MaskedNDArray([——, 1., 2.], dtype=float32)'\n\n def test_array_str(self):\n out = np.array_str(self.ma)\n assert out == '[— 1 2]'\n\n\nclass TestBitFunctions:\n @classmethod\n def setup_class(self):\n self.a = np.array([15, 255, 0], dtype='u1')\n self.mask_a = np.array([False, True, False])\n self.ma = Masked(self.a, mask=self.mask_a)\n self.b = np.unpackbits(self.a).reshape(6, 4)\n self.mask_b = np.array([False]*15 + [True, True] + [False]*7).reshape(6, 4)\n self.mb = Masked(self.b, mask=self.mask_b)\n\n @pytest.mark.parametrize('axis', [None, 1, 0])\n def test_packbits(self, axis):\n out = np.packbits(self.mb, axis=axis)\n if axis is None:\n expected = self.a\n else:\n expected = np.packbits(self.b, axis=axis)\n expected_mask = np.packbits(self.mask_b, axis=axis) > 0\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, expected_mask)\n\n def test_unpackbits(self):\n out = np.unpackbits(self.ma)\n mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))\n expected_mask = np.unpackbits(mask) > 0\n assert_array_equal(out.unmasked, self.b.ravel())\n assert_array_equal(out.mask, expected_mask)\n\n\nclass TestIndexFunctions(MaskedArraySetup):\n \"\"\"Does not seem much sense to support these...\"\"\"\n def test_unravel_index(self):\n with pytest.raises(TypeError):\n np.unravel_index(self.ma, 3)\n\n def test_ravel_multi_index(self):\n with pytest.raises(TypeError):\n np.ravel_multi_index((self.ma,), 3)\n\n def test_ix_(self):\n with pytest.raises(TypeError):\n np.ix_(self.ma)\n\n\nclass TestDtypeFunctions(MaskedArraySetup):\n def check(self, function, *args, **kwargs):\n out = function(self.ma, *args, **kwargs)\n expected = function(self.a, *args, **kwargs)\n assert out == expected\n\n def test_common_type(self):\n self.check(np.common_type)\n\n def test_result_type(self):\n self.check(np.result_type)\n\n def test_can_cast(self):\n self.check(np.can_cast, self.a.dtype)\n self.check(np.can_cast, 'f4')\n\n def test_min_scalar_type(self):\n out = np.min_scalar_type(self.ma[0, 0])\n expected = np.min_scalar_type(self.a[0, 0])\n assert out == expected\n\n def test_iscomplexobj(self):\n self.check(np.iscomplexobj)\n\n def test_isrealobj(self):\n self.check(np.isrealobj)\n\n\nclass TestMeshGrid(MaskedArraySetup):\n def test_meshgrid(self):\n a = np.arange(1., 4.)\n mask_a = np.array([True, False, False])\n ma = Masked(a, mask=mask_a)\n b = np.array([2.5, 10., 3., 4.])\n mask_b = np.array([False, True, False, True])\n mb = Masked(b, mask=mask_b)\n oa, ob = np.meshgrid(ma, mb)\n xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])\n ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])\n for o, x, m in ((oa, xa, ma), (ob, xb, mb)):\n assert_array_equal(o.unmasked, x)\n assert_array_equal(o.mask, m)\n\n\nclass TestMemoryFunctions(MaskedArraySetup):\n def test_shares_memory(self):\n assert np.shares_memory(self.ma, self.ma.unmasked)\n assert not np.shares_memory(self.ma, self.ma.mask)\n\n def test_may_share_memory(self):\n assert np.may_share_memory(self.ma, self.ma.unmasked)\n assert not np.may_share_memory(self.ma, self.ma.mask)\n\n\nclass TestDatetimeFunctions:\n # Could in principle support np.is_busday, np.busday_count, np.busday_offset.\n @classmethod\n def setup_class(self):\n self.a = np.array(['2020-12-31', '2021-01-01', '2021-01-02'], dtype='M')\n self.mask_a = np.array([False, True, False])\n self.ma = Masked(self.a, mask=self.mask_a)\n self.b = np.array([['2021-01-07'], ['2021-01-31']], dtype='M')\n self.mask_b = np.array([[False], [True]])\n self.mb = Masked(self.b, mask=self.mask_b)\n\n def test_datetime_as_string(self):\n out = np.datetime_as_string(self.ma)\n expected = np.datetime_as_string(self.a)\n assert_array_equal(out.unmasked, expected)\n assert_array_equal(out.mask, self.mask_a)\n\n\n@pytest.mark.filterwarnings('ignore:all-nan')\nclass TestNaNFunctions:\n def setup_class(self):\n self.a = np.array([[np.nan, np.nan, 3.],\n [4., 5., 6.]])\n self.mask_a = np.array([[True, False, False],\n [False, True, False]])\n self.b = np.arange(1, 7).reshape(2, 3)\n self.mask_b = self.mask_a\n self.ma = Masked(self.a, mask=self.mask_a)\n self.mb = Masked(self.b, mask=self.mask_b)\n\n def check(self, function, exact_fill_value=None, masked_result=True,\n **kwargs):\n result = function(self.ma, **kwargs)\n expected_data = function(self.ma.filled(np.nan), **kwargs)\n expected_mask = np.isnan(expected_data)\n if masked_result:\n assert isinstance(result, Masked)\n assert_array_equal(result.mask, expected_mask)\n assert np.all(result == expected_data)\n else:\n assert not isinstance(result, Masked)\n assert_array_equal(result, expected_data)\n assert not np.any(expected_mask)\n out = np.zeros_like(result)\n result2 = function(self.ma, out=out, **kwargs)\n assert result2 is out\n assert_array_equal(result2, result)\n\n def check_arg(self, function, **kwargs):\n # arg functions do not have an 'out' argument, so just test directly.\n result = function(self.ma, **kwargs)\n assert not isinstance(result, Masked)\n expected = function(self.ma.filled(np.nan), **kwargs)\n assert_array_equal(result, expected)\n\n def test_nanmin(self):\n self.check(np.nanmin)\n self.check(np.nanmin, axis=0)\n self.check(np.nanmin, axis=1)\n resi = np.nanmin(self.mb, axis=1)\n assert_array_equal(resi.unmasked, np.array([2, 4]))\n assert_array_equal(resi.mask, np.array([False, False]))\n\n def test_nanmax(self):\n self.check(np.nanmax)\n\n def test_nanargmin(self):\n self.check_arg(np.nanargmin)\n self.check_arg(np.nanargmin, axis=1)\n\n def test_nanargmax(self):\n self.check_arg(np.nanargmax)\n\n def test_nansum(self):\n self.check(np.nansum, masked_result=False)\n resi = np.nansum(self.mb, axis=1)\n assert not isinstance(resi, Masked)\n assert_array_equal(resi, np.array([5, 10]))\n\n def test_nanprod(self):\n self.check(np.nanprod, masked_result=False)\n resi = np.nanprod(self.mb, axis=1)\n assert not isinstance(resi, Masked)\n assert_array_equal(resi, np.array([6, 24]))\n\n def test_nancumsum(self):\n self.check(np.nancumsum, masked_result=False)\n resi = np.nancumsum(self.mb, axis=1)\n assert not isinstance(resi, Masked)\n assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))\n\n def test_nancumprod(self):\n self.check(np.nancumprod, masked_result=False)\n resi = np.nancumprod(self.mb, axis=1)\n assert not isinstance(resi, Masked)\n assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))\n\n def test_nanmean(self):\n self.check(np.nanmean)\n resi = np.nanmean(self.mb, axis=1)\n assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)\n assert_array_equal(resi.mask, np.array([False, False]))\n\n def test_nanvar(self):\n self.check(np.nanvar)\n self.check(np.nanvar, ddof=1)\n\n def test_nanstd(self):\n self.check(np.nanstd)\n\n def test_nanmedian(self):\n self.check(np.nanmedian)\n\n def test_nanquantile(self):\n self.check(np.nanquantile, q=0.5)\n\n def test_nanpercentile(self):\n self.check(np.nanpercentile, q=50)\n\n\nuntested_functions = set()\nif NUMPY_LT_1_20:\n financial_functions = {f for f in all_wrapped_functions.values()\n if f in np.lib.financial.__dict__.values()}\n untested_functions |= financial_functions\n\nif NUMPY_LT_1_23:\n deprecated_functions = {\n # Deprecated, removed in numpy 1.23\n np.asscalar, np.alen,\n }\nelse:\n deprecated_functions = set()\n\nuntested_functions |= deprecated_functions\nio_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}\nuntested_functions |= io_functions\n\npoly_functions = {\n np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,\n np.polymul, np.polysub, np.polyval, np.roots, np.vander\n }\nuntested_functions |= poly_functions\n\n\n# Get covered functions\ntested_functions = set()\nfor cov_cls in list(filter(inspect.isclass, locals().values())):\n for k, v in cov_cls.__dict__.items():\n if inspect.isfunction(v) and k.startswith('test'):\n f = k.replace('test_', '')\n if f in all_wrapped_functions:\n tested_functions.add(all_wrapped_functions[f])\n\n\ndef test_basic_testing_completeness():\n assert all_wrapped == (tested_functions\n | IGNORED_FUNCTIONS\n | UNSUPPORTED_FUNCTIONS)\n\n\n@pytest.mark.xfail(reason='coverage not completely set up yet')\ndef test_testing_completeness():\n assert not tested_functions.intersection(untested_functions)\n assert all_wrapped == (tested_functions | untested_functions)\n\n\nclass TestFunctionHelpersCompleteness:\n @pytest.mark.parametrize('one, two', itertools.combinations(\n (MASKED_SAFE_FUNCTIONS,\n UNSUPPORTED_FUNCTIONS,\n set(APPLY_TO_BOTH_FUNCTIONS.keys()),\n set(DISPATCHED_FUNCTIONS.keys())), 2))\n def test_no_duplicates(self, one, two):\n assert not one.intersection(two)\n\n def test_all_included(self):\n included_in_helpers = (MASKED_SAFE_FUNCTIONS |\n UNSUPPORTED_FUNCTIONS |\n set(APPLY_TO_BOTH_FUNCTIONS.keys()) |\n set(DISPATCHED_FUNCTIONS.keys()))\n assert all_wrapped == included_in_helpers\n\n @pytest.mark.xfail(reason='coverage not completely set up yet')\n def test_ignored_are_untested(self):\n assert IGNORED_FUNCTIONS == untested_functions\n"}}},{"rowIdx":1378,"cells":{"hash":{"kind":"string","value":"335b0da6a0a90074c2978ec557741b98f4be23ece77fa4625d47e8dafde672dd"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom textwrap import indent\nfrom collections import OrderedDict\n\nfrom .coordinate_helpers import CoordinateHelper\nfrom .frame import RectangularFrame, RectangularFrame1D\nfrom .coordinate_range import find_coordinate_range\n\n\nclass CoordinatesMap:\n \"\"\"\n A container for coordinate helpers that represents a coordinate system.\n\n This object can be used to access coordinate helpers by index (like a list)\n or by name (like a dictionary).\n\n Parameters\n ----------\n axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`\n The axes the coordinate map belongs to.\n transform : `~matplotlib.transforms.Transform`, optional\n The transform for the data.\n coord_meta : dict, optional\n A dictionary providing additional metadata. This should include the keys\n ``type``, ``wrap``, and ``unit``. Each of these should be a list with as\n many items as the dimension of the coordinate system. The ``type``\n entries should be one of ``longitude``, ``latitude``, or ``scalar``, the\n ``wrap`` entries should give, for the longitude, the angle at which the\n coordinate wraps (and `None` otherwise), and the ``unit`` should give\n the unit of the coordinates as :class:`~astropy.units.Unit` instances.\n This can optionally also include a ``format_unit`` entry giving the\n units to use for the tick labels (if not specified, this defaults to\n ``unit``).\n frame_class : type, optional\n The class for the frame, which should be a subclass of\n :class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a\n :class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`\n previous_frame_path : `~matplotlib.path.Path`, optional\n When changing the WCS of the axes, the frame instance will change but\n we might want to keep re-using the same underlying matplotlib\n `~matplotlib.path.Path` - in that case, this can be passed to this\n keyword argument.\n \"\"\"\n\n def __init__(self, axes, transform=None, coord_meta=None,\n frame_class=RectangularFrame, previous_frame_path=None):\n\n self._axes = axes\n self._transform = transform\n\n self.frame = frame_class(axes, self._transform, path=previous_frame_path)\n\n # Set up coordinates\n self._coords = []\n self._aliases = {}\n\n visible_count = 0\n\n for index in range(len(coord_meta['type'])):\n\n # Extract coordinate metadata\n coord_type = coord_meta['type'][index]\n coord_wrap = coord_meta['wrap'][index]\n coord_unit = coord_meta['unit'][index]\n name = coord_meta['name'][index]\n\n visible = True\n if 'visible' in coord_meta:\n visible = coord_meta['visible'][index]\n\n format_unit = None\n if 'format_unit' in coord_meta:\n format_unit = coord_meta['format_unit'][index]\n\n default_label = name[0] if isinstance(name, (tuple, list)) else name\n if 'default_axis_label' in coord_meta:\n default_label = coord_meta['default_axis_label'][index]\n\n coord_index = None\n if visible:\n visible_count += 1\n coord_index = visible_count - 1\n\n self._coords.append(CoordinateHelper(parent_axes=axes,\n parent_map=self,\n transform=self._transform,\n coord_index=coord_index,\n coord_type=coord_type,\n coord_wrap=coord_wrap,\n coord_unit=coord_unit,\n format_unit=format_unit,\n frame=self.frame,\n default_label=default_label))\n\n # Set up aliases for coordinates\n if isinstance(name, tuple):\n for nm in name:\n nm = nm.lower()\n # Do not replace an alias already in the map if we have\n # more than one alias for this axis.\n if nm not in self._aliases:\n self._aliases[nm] = index\n else:\n self._aliases[name.lower()] = index\n\n def __getitem__(self, item):\n if isinstance(item, str):\n return self._coords[self._aliases[item.lower()]]\n else:\n return self._coords[item]\n\n def __contains__(self, item):\n if isinstance(item, str):\n return item.lower() in self._aliases\n else:\n return 0 <= item < len(self._coords)\n\n def set_visible(self, visibility):\n raise NotImplementedError()\n\n def __iter__(self):\n yield from self._coords\n\n def grid(self, draw_grid=True, grid_type=None, **kwargs):\n \"\"\"\n Plot gridlines for both coordinates.\n\n Standard matplotlib appearance options (color, alpha, etc.) can be\n passed as keyword arguments.\n\n Parameters\n ----------\n draw_grid : bool\n Whether to show the gridlines\n grid_type : { 'lines' | 'contours' }\n Whether to plot the contours by determining the grid lines in\n world coordinates and then plotting them in world coordinates\n (``'lines'``) or by determining the world coordinates at many\n positions in the image and then drawing contours\n (``'contours'``). The first is recommended for 2-d images, while\n for 3-d (or higher dimensional) cubes, the ``'contours'`` option\n is recommended. By default, 'lines' is used if the transform has\n an inverse, otherwise 'contours' is used.\n \"\"\"\n for coord in self:\n coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)\n\n def get_coord_range(self):\n xmin, xmax = self._axes.get_xlim()\n\n if isinstance(self.frame, RectangularFrame1D):\n extent = [xmin, xmax]\n else:\n ymin, ymax = self._axes.get_ylim()\n extent = [xmin, xmax, ymin, ymax]\n\n return find_coordinate_range(self._transform,\n extent,\n [coord.coord_type for coord in self if coord.coord_index is not None],\n [coord.coord_unit for coord in self if coord.coord_index is not None],\n [coord.coord_wrap for coord in self if coord.coord_index is not None])\n\n def _as_table(self):\n\n # Import Table here to avoid importing the astropy.table package\n # every time astropy.visualization.wcsaxes is imported.\n from astropy.table import Table # noqa\n\n rows = []\n for icoord, coord in enumerate(self._coords):\n aliases = [key for key, value in self._aliases.items() if value == icoord]\n row = OrderedDict([('index', icoord), ('aliases', ' '.join(aliases)),\n ('type', coord.coord_type), ('unit', coord.coord_unit),\n ('wrap', coord.coord_wrap), ('format_unit', coord.get_format_unit()),\n ('visible', 'no' if coord.coord_index is None else 'yes')])\n rows.append(row)\n return Table(rows=rows)\n\n def __repr__(self):\n s = f''\n"}}},{"rowIdx":1379,"cells":{"hash":{"kind":"string","value":"0536edb77b3f4ecdafbb784365d46dedab7f90fdc084a3122408636d5fe60d36"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport io\n\nimport pytest\n\nfrom astropy.utils.compat.optional_deps import HAS_PLT\nif HAS_PLT:\n import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\nfrom astropy.visualization.units import quantity_support\n\n\ndef teardown_function(function):\n plt.close('all')\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_units():\n plt.figure()\n\n with quantity_support():\n buff = io.BytesIO()\n\n plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')\n plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)\n plt.legend()\n # Also test fill_between, which requires actual conversion to ndarray\n # with numpy >=1.10 (#4654).\n plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)\n plt.savefig(buff, format='svg')\n\n assert plt.gca().xaxis.get_units() == u.m\n assert plt.gca().yaxis.get_units() == u.kg\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_units_errbarr():\n pytest.importorskip(\"matplotlib\")\n plt.figure()\n\n with quantity_support():\n x = [1, 2, 3] * u.s\n y = [1, 2, 3] * u.m\n yerr = [3, 2, 1] * u.cm\n\n fig, ax = plt.subplots()\n ax.errorbar(x, y, yerr=yerr)\n\n assert ax.xaxis.get_units() == u.s\n assert ax.yaxis.get_units() == u.m\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_incompatible_units():\n # NOTE: minversion check does not work properly for matplotlib dev.\n try:\n # https://github.com/matplotlib/matplotlib/pull/13005\n from matplotlib.units import ConversionError\n except ImportError:\n err_type = u.UnitConversionError\n else:\n err_type = ConversionError\n\n plt.figure()\n\n with quantity_support():\n plt.plot([1, 2, 3] * u.m)\n with pytest.raises(err_type):\n plt.plot([105, 210, 315] * u.kg)\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_quantity_subclass():\n \"\"\"Check that subclasses are recognized.\n\n This sadly is not done by matplotlib.units itself, though\n there is a PR to change it:\n https://github.com/matplotlib/matplotlib/pull/13536\n \"\"\"\n plt.figure()\n\n with quantity_support():\n plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)\n plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)\n plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)\n\n assert plt.gca().xaxis.get_units() == u.deg\n assert plt.gca().yaxis.get_units() == u.kg\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_nested():\n\n with quantity_support():\n\n with quantity_support():\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)\n\n assert ax.xaxis.get_units() == u.deg\n assert ax.yaxis.get_units() == u.kg\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)\n\n assert ax.xaxis.get_units() == u.arcsec\n assert ax.yaxis.get_units() == u.pc\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_empty_hist():\n\n with quantity_support():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.hist([1, 2, 3, 4] * u.mmag, bins=100)\n # The second call results in an empty list being passed to the\n # unit converter in matplotlib >= 3.1\n ax.hist([] * u.mmag, bins=100)\n\n\n@pytest.mark.skipif('not HAS_PLT')\ndef test_radian_formatter():\n with quantity_support():\n fig, ax = plt.subplots()\n ax.plot([1, 2, 3], [1, 2, 3] * u.rad * np.pi)\n fig.canvas.draw()\n labels = [tl.get_text() for tl in ax.yaxis.get_ticklabels()]\n assert labels == ['π/2', 'π', '3π/2', '2π', '5π/2', '3π', '7π/2']\n"}}},{"rowIdx":1380,"cells":{"hash":{"kind":"string","value":"a3abce9b794f648eeee5ec5299469e0baea030b5101c79828d9958143bbd2425"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\npytest.importorskip('matplotlib') # noqa\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates\nfrom contextlib import nullcontext\nfrom erfa import ErfaWarning\n\nfrom astropy.time import Time\nfrom astropy.visualization.time import time_support\n\n# Matplotlib 3.3 added a settable epoch for plot dates and changed the default\n# from 0000-12-31 to 1970-01-01. This can be checked by the existence of\n# get_epoch() in matplotlib.dates.\nMPL_EPOCH_1970 = hasattr(matplotlib.dates, 'get_epoch')\n\n# Since some of the examples below use times/dates in the future, we use the\n# TAI time scale to avoid ERFA warnings about dubious years.\nDEFAULT_SCALE = 'tai'\n\n\ndef get_ticklabels(axis):\n axis.figure.canvas.draw()\n return [x.get_text() for x in axis.get_ticklabels()]\n\n\ndef teardown_function(function):\n plt.close('all')\n\n\n# We first check that we get the expected labels for different time intervals\n# for standard ISO formatting. This is a way to check both the locator and\n# formatter code.\n\nRANGE_CASES = [\n\n # Interval of many years\n (('2014-03-22T12:30:30.9', '2077-03-22T12:30:32.1'),\n ['2020-01-01',\n '2040-01-01',\n '2060-01-01']),\n\n # Interval of a few years\n (('2014-03-22T12:30:30.9', '2017-03-22T12:30:32.1'),\n ['2015-01-01',\n '2016-01-01',\n '2017-01-01']),\n\n # Interval of just under a year\n (('2014-03-22T12:30:30.9', '2015-01-22T12:30:32.1'),\n ['2014-05-01',\n '2014-10-01']),\n\n # Interval of a few months\n (('2014-11-22T12:30:30.9', '2015-02-22T12:30:32.1'),\n ['2014-12-01',\n '2015-01-01',\n '2015-02-01']),\n\n # Interval of just over a month\n (('2014-03-22T12:30:30.9', '2014-04-23T12:30:32.1'),\n ['2014-04-01']),\n\n # Interval of just under a month\n (('2014-03-22T12:30:30.9', '2014-04-21T12:30:32.1'),\n ['2014-03-24',\n '2014-04-03',\n '2014-04-13']),\n\n # Interval of just over an hour\n (('2014-03-22T12:30:30.9', '2014-03-22T13:31:30.9'),\n ['2014-03-22T12:40:00.000',\n '2014-03-22T13:00:00.000',\n '2014-03-22T13:20:00.000']),\n\n # Interval of just under an hour\n (('2014-03-22T12:30:30.9', '2014-03-22T13:28:30.9'),\n ['2014-03-22T12:40:00.000',\n '2014-03-22T13:00:00.000',\n '2014-03-22T13:20:00.000']),\n\n # Interval of a few minutes\n (('2014-03-22T12:30:30.9', '2014-03-22T12:38:30.9'),\n ['2014-03-22T12:33:00.000',\n '2014-03-22T12:36:00.000']),\n\n # Interval of a few seconds\n (('2014-03-22T12:30:30.9', '2014-03-22T12:30:40.9'),\n ['2014-03-22T12:30:33.000',\n '2014-03-22T12:30:36.000',\n '2014-03-22T12:30:39.000']),\n\n # Interval of a couple of seconds\n (('2014-03-22T12:30:30.9', '2014-03-22T12:30:32.1'),\n ['2014-03-22T12:30:31.000',\n '2014-03-22T12:30:31.500',\n '2014-03-22T12:30:32.000']),\n\n # Interval of under a second\n (('2014-03-22T12:30:30.89', '2014-03-22T12:30:31.19'),\n ['2014-03-22T12:30:30.900',\n '2014-03-22T12:30:31.000',\n '2014-03-22T12:30:31.100']),\n\n]\n\n\n@pytest.mark.parametrize(('interval', 'expected'), RANGE_CASES)\ndef test_formatter_locator(interval, expected):\n\n # Check that the ticks and labels returned for the above cases are correct.\n\n with time_support():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlim(Time(interval[0], scale=DEFAULT_SCALE),\n Time(interval[1], scale=DEFAULT_SCALE))\n assert get_ticklabels(ax.xaxis) == expected\n\n\nFORMAT_CASES = [\n ('byear', ['2020', '2040', '2060']),\n ('byear_str', ['B2020.000', 'B2040.000', 'B2060.000']),\n ('cxcsec', ['1000000000', '1500000000', '2000000000', '2500000000']),\n ('decimalyear', ['2020', '2040', '2060']),\n ('fits', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),\n ('gps', ['1500000000', '2000000000', '2500000000', '3000000000']),\n ('iso', ['2020-01-01 00:00:00.000', '2040-01-01 00:00:00.000', '2060-01-01 00:00:00.000']),\n ('isot', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),\n ('jd', ['2458000', '2464000', '2470000', '2476000']),\n ('jyear', ['2020', '2040', '2060']),\n ('jyear_str', ['J2020.000', 'J2040.000', 'J2060.000']),\n ('mjd', ['60000', '66000', '72000', '78000']),\n ('plot_date', (['18000', '24000', '30000', '36000'] if MPL_EPOCH_1970 else\n ['738000', '744000', '750000', '756000'])),\n ('unix', ['1500000000', '2000000000', '2500000000', '3000000000']),\n ('yday', ['2020:001:00:00:00.000', '2040:001:00:00:00.000', '2060:001:00:00:00.000']),\n]\n\n\n@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)\ndef test_formats(format, expected):\n # Check that the locators/formatters work fine for all time formats\n with time_support(format=format, simplify=False):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n # Getting unix time and plot_date requires going through a scale for\n # which ERFA emits a warning about the date being dubious\n with pytest.warns(ErfaWarning) if format in ['unix', 'plot_date'] else nullcontext():\n ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),\n Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))\n assert get_ticklabels(ax.xaxis) == expected\n ax.get_xlabel() == f'Time ({format})'\n\n\n@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)\ndef test_auto_formats(format, expected):\n # Check that the format/scale is taken from the first time used.\n with time_support(simplify=False):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n # Getting unix time and plot_date requires going through a scale for\n # which ERFA emits a warning about the date being dubious\n with pytest.warns(ErfaWarning) if format in ['unix', 'plot_date'] else nullcontext():\n ax.set_xlim(Time(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE), format=format),\n Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))\n assert get_ticklabels(ax.xaxis) == expected\n ax.get_xlabel() == f'Time ({format})'\n\n\nFORMAT_CASES_SIMPLIFY = [\n ('fits', ['2020-01-01', '2040-01-01', '2060-01-01']),\n ('iso', ['2020-01-01', '2040-01-01', '2060-01-01']),\n ('isot', ['2020-01-01', '2040-01-01', '2060-01-01']),\n ('yday', ['2020', '2040', '2060']),\n]\n\n\n@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES_SIMPLIFY)\ndef test_formats_simplify(format, expected):\n # Check the use of the simplify= option\n with time_support(format=format, simplify=True):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),\n Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))\n assert get_ticklabels(ax.xaxis) == expected\n\n\ndef test_plot():\n # Make sure that plot() works properly\n with time_support():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),\n Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))\n ax.plot(Time(['2015-03-22T12:30:30.9',\n '2018-03-22T12:30:30.9',\n '2021-03-22T12:30:30.9'], scale=DEFAULT_SCALE))\n\n\ndef test_nested():\n\n with time_support(format='iso', simplify=False):\n\n with time_support(format='yday', simplify=True):\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),\n Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))\n assert get_ticklabels(ax.xaxis) == ['2020', '2040', '2060']\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlim(Time('2014-03-22T12:30:30.9', scale=DEFAULT_SCALE),\n Time('2077-03-22T12:30:32.1', scale=DEFAULT_SCALE))\n assert get_ticklabels(ax.xaxis) == ['2020-01-01 00:00:00.000',\n '2040-01-01 00:00:00.000',\n '2060-01-01 00:00:00.000']\n"}}},{"rowIdx":1381,"cells":{"hash":{"kind":"string","value":"7ba5c68c2796901587d84802294b95cf022533bf9876fe6e02d4e9970e22a072"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport warnings\n\nfrom packaging.version import Version\nimport pytest\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.contour import QuadContourSet\n\nfrom astropy import units as u\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\nfrom astropy.coordinates import SkyCoord\n\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.wcs.wcsapi import SlicedLowLevelWCS, HighLevelWCSWrapper\n\nfrom astropy.visualization.wcsaxes.core import WCSAxes\nfrom astropy.visualization.wcsaxes.frame import (\n EllipticalFrame, RectangularFrame, RectangularFrame1D)\nfrom astropy.visualization.wcsaxes.utils import get_coord_meta\nfrom astropy.visualization.wcsaxes.transforms import CurvedTransform\n\nft_version = Version(matplotlib.ft2font.__freetype_version__)\nFREETYPE_261 = ft_version == Version(\"2.6.1\")\n\n# We cannot use matplotlib.checkdep_usetex() anymore, see\n# https://github.com/matplotlib/matplotlib/issues/23244\nTEX_UNAVAILABLE = True\n\nMATPLOTLIB_DEV = Version(matplotlib.__version__).is_devrelease\n\n\ndef teardown_function(function):\n plt.close('all')\n\n\ndef test_grid_regression(ignore_matplotlibrc):\n # Regression test for a bug that meant that if the rc parameter\n # axes.grid was set to True, WCSAxes would crash upon initialization.\n plt.rc('axes', grid=True)\n fig = plt.figure(figsize=(3, 3))\n WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])\n\n\ndef test_format_coord_regression(ignore_matplotlibrc, tmpdir):\n # Regression test for a bug that meant that if format_coord was called by\n # Matplotlib before the axes were drawn, an error occurred.\n fig = plt.figure(figsize=(3, 3))\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])\n fig.add_axes(ax)\n assert ax.format_coord(10, 10) == \"\"\n assert ax.coords[0].format_coord(10) == \"\"\n assert ax.coords[1].format_coord(10) == \"\"\n fig.savefig(tmpdir.join('nothing').strpath)\n assert ax.format_coord(10, 10) == \"10.0 10.0 (world)\"\n assert ax.coords[0].format_coord(10) == \"10.0\"\n assert ax.coords[1].format_coord(10) == \"10.0\"\n\n\nTARGET_HEADER = fits.Header.fromstring(\"\"\"\nNAXIS = 2\nNAXIS1 = 200\nNAXIS2 = 100\nCTYPE1 = 'RA---MOL'\nCRPIX1 = 500\nCRVAL1 = 180.0\nCDELT1 = -0.4\nCUNIT1 = 'deg '\nCTYPE2 = 'DEC--MOL'\nCRPIX2 = 400\nCRVAL2 = 0.0\nCDELT2 = 0.4\nCUNIT2 = 'deg '\nCOORDSYS= 'icrs '\n\"\"\", sep='\\n')\n\n\n@pytest.mark.parametrize('grid_type', ['lines', 'contours'])\ndef test_no_numpy_warnings(ignore_matplotlibrc, tmpdir, grid_type):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))\n ax.imshow(np.zeros((100, 200)))\n ax.coords.grid(color='white', grid_type=grid_type)\n\n # There should be no warnings raised if some pixels are outside WCS\n # (since this is normal).\n # BUT our own catch_warning was ignoring some warnings before, so now we\n # have to catch it. Otherwise, the pytest filterwarnings=error\n # setting in setup.cfg will fail this test.\n # There are actually multiple warnings but they are all similar.\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message=r'.*converting a masked element to nan.*')\n warnings.filterwarnings('ignore', message=r'.*No contour levels were found within the data range.*')\n warnings.filterwarnings('ignore', message=r'.*np\\.asscalar\\(a\\) is deprecated since NumPy v1\\.16.*')\n warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN will be required.*')\n fig.savefig(tmpdir.join('test.png').strpath)\n\n\ndef test_invalid_frame_overlay(ignore_matplotlibrc):\n\n # Make sure a nice error is returned if a frame doesn't exist\n ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))\n with pytest.raises(ValueError) as exc:\n ax.get_coords_overlay('banana')\n assert exc.value.args[0] == 'Frame banana not found'\n\n with pytest.raises(ValueError) as exc:\n get_coord_meta('banana')\n assert exc.value.args[0] == 'Unknown frame: banana'\n\n\ndef test_plot_coord_transform(ignore_matplotlibrc):\n\n twoMASS_k_header = get_pkg_data_filename('data/2MASS_k_header')\n twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],\n projection=WCS(twoMASS_k_header),\n aspect='equal')\n ax.set_xlim(-0.5, 720.5)\n ax.set_ylim(-0.5, 720.5)\n\n c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)\n with pytest.raises(TypeError):\n ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))\n\n\ndef test_set_label_properties(ignore_matplotlibrc):\n\n # Regression test to make sure that arguments passed to\n # set_xlabel/set_ylabel are passed to the underlying coordinate helpers\n\n ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))\n\n ax.set_xlabel('Test x label', labelpad=2, color='red')\n ax.set_ylabel('Test y label', labelpad=3, color='green')\n\n assert ax.coords[0].axislabels.get_text() == 'Test x label'\n assert ax.coords[0].axislabels.get_minpad('b') == 2\n assert ax.coords[0].axislabels.get_color() == 'red'\n\n assert ax.coords[1].axislabels.get_text() == 'Test y label'\n assert ax.coords[1].axislabels.get_minpad('l') == 3\n assert ax.coords[1].axislabels.get_color() == 'green'\n\n assert ax.get_xlabel() == 'Test x label'\n assert ax.get_ylabel() == 'Test y label'\n\n\nGAL_HEADER = fits.Header.fromstring(\"\"\"\nSIMPLE = T / conforms to FITS standard\nBITPIX = -32 / array data type\nNAXIS = 3 / number of array dimensions\nNAXIS1 = 31\nNAXIS2 = 2881\nNAXIS3 = 480\nEXTEND = T\nCTYPE1 = 'DISTMOD '\nCRVAL1 = 3.5\nCDELT1 = 0.5\nCRPIX1 = 1.0\nCTYPE2 = 'GLON-CAR'\nCRVAL2 = 180.0\nCDELT2 = -0.125\nCRPIX2 = 1.0\nCTYPE3 = 'GLAT-CAR'\nCRVAL3 = 0.0\nCDELT3 = 0.125\nCRPIX3 = 241.0\n\"\"\", sep='\\n')\n\n\ndef test_slicing_warnings(ignore_matplotlibrc, tmpdir):\n\n # Regression test to make sure that no warnings are emitted by the tick\n # locator for the sliced axis when slicing a cube.\n\n # Scalar case\n\n wcs3d = WCS(naxis=3)\n wcs3d.wcs.ctype = ['x', 'y', 'z']\n wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']\n wcs3d.wcs.crpix = [614.5, 856.5, 333]\n wcs3d.wcs.cdelt = [6.25, 6.25, 23]\n wcs3d.wcs.crval = [0., 0., 1.]\n\n with warnings.catch_warnings():\n # https://github.com/astropy/astropy/issues/9690\n warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')\n plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))\n plt.savefig(tmpdir.join('test.png').strpath)\n\n # Angle case\n\n wcs3d = WCS(GAL_HEADER)\n\n with warnings.catch_warnings():\n # https://github.com/astropy/astropy/issues/9690\n warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')\n plt.clf()\n plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))\n plt.savefig(tmpdir.join('test.png').strpath)\n\n\ndef test_plt_xlabel_ylabel(tmpdir):\n\n # Regression test for a bug that happened when using plt.xlabel\n # and plt.ylabel with Matplotlib 3.0\n\n plt.subplot(projection=WCS())\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.savefig(tmpdir.join('test.png').strpath)\n\n\ndef test_grid_type_contours_transform(tmpdir):\n\n # Regression test for a bug that caused grid_type='contours' to not work\n # with custom transforms\n\n class CustomTransform(CurvedTransform):\n\n # We deliberately don't define the inverse, and has_inverse should\n # default to False.\n\n def transform(self, values):\n return values * 1.3\n\n transform = CustomTransform()\n coord_meta = {'type': ('scalar', 'scalar'),\n 'unit': (u.m, u.s),\n 'wrap': (None, None),\n 'name': ('x', 'y')}\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],\n transform=transform, coord_meta=coord_meta)\n fig.add_axes(ax)\n ax.grid(grid_type='contours')\n fig.savefig(tmpdir.join('test.png').strpath)\n\n\ndef test_plt_imshow_origin():\n\n # Regression test for a bug that caused origin to be set to upper when\n # plt.imshow was called.\n\n ax = plt.subplot(projection=WCS())\n plt.imshow(np.ones((2, 2)))\n assert ax.get_xlim() == (-0.5, 1.5)\n assert ax.get_ylim() == (-0.5, 1.5)\n\n\ndef test_ax_imshow_origin():\n\n # Regression test for a bug that caused origin to be set to upper when\n # ax.imshow was called with no origin\n\n ax = plt.subplot(projection=WCS())\n ax.imshow(np.ones((2, 2)))\n assert ax.get_xlim() == (-0.5, 1.5)\n assert ax.get_ylim() == (-0.5, 1.5)\n\n\ndef test_grid_contour_large_spacing(tmpdir):\n\n # Regression test for a bug that caused a crash when grid was called and\n # didn't produce grid lines (due e.g. to too large spacing) and was then\n # called again.\n\n filename = tmpdir.join('test.png').strpath\n\n ax = plt.subplot(projection=WCS())\n ax.set_xlim(-0.5, 1.5)\n ax.set_ylim(-0.5, 1.5)\n ax.coords[0].set_ticks(values=[] * u.one)\n\n ax.coords[0].grid(grid_type='contours')\n plt.savefig(filename)\n\n ax.coords[0].grid(grid_type='contours')\n plt.savefig(filename)\n\n\ndef test_contour_return():\n\n # Regression test for a bug that caused contour and contourf to return None\n # instead of the contour object.\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])\n fig.add_axes(ax)\n\n cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))\n assert isinstance(cset, QuadContourSet)\n\n cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))\n assert isinstance(cset, QuadContourSet)\n\n\ndef test_contour_empty():\n\n # Regression test for a bug that caused contour to crash if no contours\n # were present.\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])\n fig.add_axes(ax)\n with pytest.warns(UserWarning, match='No contour levels were found within the data range'):\n ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world'))\n\n\ndef test_iterate_coords(ignore_matplotlibrc, tmpdir):\n\n # Regression test for a bug that caused ax.coords to return too few axes\n\n wcs3d = WCS(naxis=3)\n wcs3d.wcs.ctype = ['x', 'y', 'z']\n wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']\n wcs3d.wcs.crpix = [614.5, 856.5, 333]\n wcs3d.wcs.cdelt = [6.25, 6.25, 23]\n wcs3d.wcs.crval = [0., 0., 1.]\n\n ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))\n\n x, y, z = ax.coords\n\n\ndef test_invalid_slices_errors(ignore_matplotlibrc):\n\n # Make sure that users get a clear message when specifying a WCS with\n # >2 dimensions without giving the 'slices' argument, or if the 'slices'\n # argument has too many/few elements.\n\n wcs3d = WCS(naxis=3)\n wcs3d.wcs.ctype = ['x', 'y', 'z']\n\n plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))\n\n with pytest.raises(ValueError) as exc:\n plt.subplot(1, 1, 1, projection=wcs3d)\n assert exc.value.args[0] == (\"WCS has more than 2 pixel dimensions, so \"\n \"'slices' should be set\")\n\n with pytest.raises(ValueError) as exc:\n plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1, 2))\n assert exc.value.args[0] == (\"'slices' should have as many elements as \"\n \"WCS has pixel dimensions (should be 3)\")\n\n wcs2d = WCS(naxis=2)\n wcs2d.wcs.ctype = ['x', 'y']\n\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs2d)\n assert ax.frame_class is RectangularFrame\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('x', 'y'))\n assert ax.frame_class is RectangularFrame\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('y', 'x'))\n assert ax.frame_class is RectangularFrame\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=['x', 'y'])\n assert ax.frame_class is RectangularFrame\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'x'))\n assert ax.frame_class is RectangularFrame1D\n\n wcs1d = WCS(naxis=1)\n wcs1d.wcs.ctype = ['x']\n\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs1d)\n assert ax.frame_class is RectangularFrame1D\n\n with pytest.raises(ValueError):\n plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'y'))\n\n\nEXPECTED_REPR_1 = \"\"\"\n\n \"\"\".strip()\n\nEXPECTED_REPR_2 = \"\"\"\n\n \"\"\".strip()\n\n\ndef test_repr(ignore_matplotlibrc):\n\n # Unit test to make sure __repr__ looks as expected\n\n wcs3d = WCS(GAL_HEADER)\n\n # Cube header has world coordinates as distance, lon, lat, so start off\n # by slicing in a way that we select just lon,lat:\n\n ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, 'x', 'y'))\n assert repr(ax.coords) == EXPECTED_REPR_1\n\n # Now slice in a way that all world coordinates are still present:\n\n plt.clf()\n ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))\n assert repr(ax.coords) == EXPECTED_REPR_2\n\n\n@pytest.fixture\ndef time_spectral_wcs_2d():\n wcs = WCS(naxis=2)\n wcs.wcs.ctype = ['FREQ', 'TIME']\n wcs.wcs.set()\n return wcs\n\n\ndef test_time_wcs(time_spectral_wcs_2d):\n\n # Regression test for a bug that caused WCSAxes to error when using a WCS\n # with a time axis.\n\n plt.subplot(projection=time_spectral_wcs_2d)\n\n\n@pytest.mark.skipif('TEX_UNAVAILABLE')\ndef test_simplify_labels_usetex(ignore_matplotlibrc, tmpdir):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/8004.\"\"\"\n plt.rc('text', usetex=True)\n\n header = {\n 'NAXIS': 2,\n 'NAXIS1': 360,\n 'NAXIS2': 180,\n 'CRPIX1': 180.5,\n 'CRPIX2': 90.5,\n 'CRVAL1': 180.0,\n 'CRVAL2': 0.0,\n 'CDELT1': -2 * np.sqrt(2) / np.pi,\n 'CDELT2': 2 * np.sqrt(2) / np.pi,\n 'CTYPE1': 'RA---MOL',\n 'CTYPE2': 'DEC--MOL',\n 'RADESYS': 'ICRS'}\n\n wcs = WCS(header)\n fig, ax = plt.subplots(\n subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))\n ax.set_xlim(-0.5, header['NAXIS1'] - 0.5)\n ax.set_ylim(-0.5, header['NAXIS2'] - 0.5)\n ax.coords[0].set_ticklabel(exclude_overlapping=True)\n ax.coords[1].set_ticklabel(exclude_overlapping=True)\n ax.coords[0].set_ticks(spacing=45 * u.deg)\n ax.coords[1].set_ticks(spacing=30 * u.deg)\n ax.grid()\n\n fig.savefig(tmpdir / 'plot.png')\n\n\n@pytest.mark.parametrize('frame_class', [RectangularFrame, EllipticalFrame])\ndef test_set_labels_with_coords(ignore_matplotlibrc, frame_class):\n \"\"\"Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a\n WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.\n \"\"\"\n\n labels = ['RA', 'Declination']\n header = {\n 'NAXIS': 2,\n 'NAXIS1': 360,\n 'NAXIS2': 180,\n 'CRPIX1': 180.5,\n 'CRPIX2': 90.5,\n 'CRVAL1': 180.0,\n 'CRVAL2': 0.0,\n 'CDELT1': -2 * np.sqrt(2) / np.pi,\n 'CDELT2': 2 * np.sqrt(2) / np.pi,\n 'CTYPE1': 'RA---AIT',\n 'CTYPE2': 'DEC--AIT'}\n\n wcs = WCS(header)\n fig, ax = plt.subplots(\n subplot_kw=dict(frame_class=frame_class, projection=wcs))\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n\n assert ax.get_xlabel() == labels[0]\n assert ax.get_ylabel() == labels[1]\n for i in range(2):\n assert ax.coords[i].get_axislabel() == labels[i]\n\n\n@pytest.mark.parametrize('atol', [0.2, 1.0e-8])\ndef test_bbox_size(atol):\n # Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)\n extents = [11.38888888888889, 3.5, 576.0, 432.0]\n\n fig = plt.figure()\n ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])\n fig.add_axes(ax)\n fig.canvas.draw()\n renderer = fig.canvas.renderer\n ax_bbox = ax.get_tightbbox(renderer)\n\n # Enforce strict test only with reference Freetype version\n if atol < 0.1 and not FREETYPE_261:\n pytest.xfail(\"Exact BoundingBox dimensions are only ensured with FreeType 2.6.1\")\n assert np.allclose(ax_bbox.extents, extents, atol=atol)\n\n\ndef test_wcs_type_transform_regression():\n wcs = WCS(TARGET_HEADER)\n sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])\n ax = plt.subplot(1, 1, 1, projection=wcs)\n ax.get_transform(sliced_wcs)\n\n high_wcs = HighLevelWCSWrapper(sliced_wcs)\n ax.get_transform(sliced_wcs)\n\n\ndef test_multiple_draws_grid_contours(tmpdir):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=WCS())\n ax.grid(color='black', grid_type='contours')\n fig.savefig(tmpdir / 'plot.png')\n fig.savefig(tmpdir / 'plot.png')\n"}}},{"rowIdx":1382,"cells":{"hash":{"kind":"string","value":"402837800849df233960829ea11da410e632056ecdf618ee2f45597cb32ca99e"},"content":{"kind":"string","value":"from astropy.nddata import NDData, NDIOMixin, NDDataRef\n\n\n# Alias NDDataAllMixins in case this will be renamed ... :-)\nNDDataIO = NDDataRef\n\n\ndef test_simple_write_read(tmpdir):\n ndd = NDDataIO([1, 2, 3])\n assert hasattr(ndd, 'read')\n assert hasattr(ndd, 'write')\n"}}},{"rowIdx":1383,"cells":{"hash":{"kind":"string","value":"65ea7ca6cfb409109e804cd19e8678b7a6fdeb0714e03bf5d2bc30c73fc8dd4e"},"content":{"kind":"string","value":"import os\nimport abc\n\nimport numpy as np\n\n__all__ = ['BaseLowLevelWCS', 'validate_physical_types']\n\n\nclass BaseLowLevelWCS(metaclass=abc.ABCMeta):\n \"\"\"\n Abstract base class for the low-level WCS interface.\n\n This is described in `APE 14: A shared Python interface for World Coordinate\n Systems `_.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def pixel_n_dim(self):\n \"\"\"\n The number of axes in the pixel coordinate system.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def world_n_dim(self):\n \"\"\"\n The number of axes in the world coordinate system.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def world_axis_physical_types(self):\n \"\"\"\n An iterable of strings describing the physical type for each world axis.\n\n These should be names from the VO UCD1+ controlled Vocabulary\n (http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD\n type exists, this can instead be ``\"custom:xxx\"``, where ``xxx`` is an\n arbitrary string. Alternatively, if the physical type is\n unknown/undefined, an element can be `None`.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def world_axis_units(self):\n \"\"\"\n An iterable of strings given the units of the world coordinates for each\n axis.\n\n The strings should follow the `IVOA VOUnit standard\n `_ (though as noted in the VOUnit\n specification document, units that do not follow this standard are still\n allowed, but just not recommended).\n \"\"\"\n\n @abc.abstractmethod\n def pixel_to_world_values(self, *pixel_arrays):\n \"\"\"\n Convert pixel coordinates to world coordinates.\n\n This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as\n input, and pixel coordinates should be zero-based. Returns\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are\n assumed to be 0 at the center of the first pixel in each dimension. If a\n pixel is in a region where the WCS is not defined, NaN can be returned.\n The coordinates should be specified in the ``(x, y)`` order, where for\n an image, ``x`` is the horizontal coordinate and ``y`` is the vertical\n coordinate.\n\n If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this\n method returns a single scalar or array, otherwise a tuple of scalars or\n arrays is returned.\n \"\"\"\n\n def array_index_to_world_values(self, *index_arrays):\n \"\"\"\n Convert array indices to world coordinates.\n\n This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that\n the indices should be given in ``(i, j)`` order, where for an image\n ``i`` is the row and ``j`` is the column (i.e. the opposite order to\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).\n\n If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this\n method returns a single scalar or array, otherwise a tuple of scalars or\n arrays is returned.\n \"\"\"\n return self.pixel_to_world_values(*index_arrays[::-1])\n\n @abc.abstractmethod\n def world_to_pixel_values(self, *world_arrays):\n \"\"\"\n Convert world coordinates to pixel coordinates.\n\n This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as\n input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel\n coordinates are assumed to be 0 at the center of the first pixel in each\n dimension. If a world coordinate does not have a matching pixel\n coordinate, NaN can be returned. The coordinates should be returned in\n the ``(x, y)`` order, where for an image, ``x`` is the horizontal\n coordinate and ``y`` is the vertical coordinate.\n\n If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this\n method returns a single scalar or array, otherwise a tuple of scalars or\n arrays is returned.\n \"\"\"\n\n def world_to_array_index_values(self, *world_arrays):\n \"\"\"\n Convert world coordinates to array indices.\n\n This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that\n the indices should be returned in ``(i, j)`` order, where for an image\n ``i`` is the row and ``j`` is the column (i.e. the opposite order to\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be\n returned as rounded integers.\n\n If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this\n method returns a single scalar or array, otherwise a tuple of scalars or\n arrays is returned.\n \"\"\"\n pixel_arrays = self.world_to_pixel_values(*world_arrays)\n if self.pixel_n_dim == 1:\n pixel_arrays = (pixel_arrays,)\n else:\n pixel_arrays = pixel_arrays[::-1]\n array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays)\n return array_indices[0] if self.pixel_n_dim == 1 else array_indices\n\n @property\n @abc.abstractmethod\n def world_axis_object_components(self):\n \"\"\"\n A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information\n on constructing high-level objects for the world coordinates.\n\n Each element of the list is a tuple with three items:\n\n * The first is a name for the world object this world array\n corresponds to, which *must* match the string names used in\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might\n appear twice because two world arrays might correspond to a single\n world object (e.g. a celestial coordinate might have both “ra” and\n “dec” arrays, which correspond to a single sky coordinate object).\n\n * The second element is either a string keyword argument name or a\n positional index for the corresponding class from\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.\n\n * The third argument is a string giving the name of the property\n to access on the corresponding class from\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in\n order to get numerical values. Alternatively, this argument can be a\n callable Python object that takes a high-level coordinate object and\n returns the numerical values suitable for passing to the low-level\n WCS transformation methods.\n\n See the document\n `APE 14: A shared Python interface for World Coordinate Systems\n `_ for examples.\n \"\"\"\n\n @property\n @abc.abstractmethod\n def world_axis_object_classes(self):\n \"\"\"\n A dictionary giving information on constructing high-level objects for\n the world coordinates.\n\n Each key of the dictionary is a string key from\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a\n tuple with three elements or four elements:\n\n * The first element of the tuple must be a class or a string specifying\n the fully-qualified name of a class, which will specify the actual\n Python object to be created.\n\n * The second element, should be a tuple specifying the positional\n arguments required to initialize the class. If\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the\n world coordinates should be passed as a positional argument, this this\n tuple should include `None` placeholders for the world coordinates.\n\n * The third tuple element must be a dictionary with the keyword\n arguments required to initialize the class.\n\n * Optionally, for advanced use cases, the fourth element (if present)\n should be a callable Python object that gets called instead of the\n class and gets passed the positional and keyword arguments. It should\n return an object of the type of the first element in the tuple.\n\n Note that we don't require the classes to be Astropy classes since there\n is no guarantee that Astropy will have all the classes to represent all\n kinds of world coordinates. Furthermore, we recommend that the output be\n kept as human-readable as possible.\n\n The classes used here should have the ability to do conversions by\n passing an instance as the first argument to the same class with\n different arguments (e.g. ``Time(Time(...), scale='tai')``). This is\n a requirement for the implementation of the high-level interface.\n\n The second and third tuple elements for each value of this dictionary\n can in turn contain either instances of classes, or if necessary can\n contain serialized versions that should take the same form as the main\n classes described above (a tuple with three elements with the fully\n qualified name of the class, then the positional arguments and the\n keyword arguments). For low-level API objects implemented in Python, we\n recommend simply returning the actual objects (not the serialized form)\n for optimal performance. Implementations should either always or never\n use serialized classes to represent Python objects, and should indicate\n which of these they follow using the\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.\n\n See the document\n `APE 14: A shared Python interface for World Coordinate Systems\n `_ for examples .\n \"\"\"\n\n # The following three properties have default fallback implementations, so\n # they are not abstract.\n\n @property\n def array_shape(self):\n \"\"\"\n The shape of the data that the WCS applies to as a tuple of length\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``\n order (the convention for arrays in Python).\n\n If the WCS is valid in the context of a dataset with a particular\n shape, then this property can be used to store the shape of the\n data. This can be used for example if implementing slicing of WCS\n objects. This is an optional property, and it should return `None`\n if a shape is not known or relevant.\n \"\"\"\n if self.pixel_shape is None:\n return None\n else:\n return self.pixel_shape[::-1]\n\n @property\n def pixel_shape(self):\n \"\"\"\n The shape of the data that the WCS applies to as a tuple of length\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``\n order (where for an image, ``x`` is the horizontal coordinate and ``y``\n is the vertical coordinate).\n\n If the WCS is valid in the context of a dataset with a particular\n shape, then this property can be used to store the shape of the\n data. This can be used for example if implementing slicing of WCS\n objects. This is an optional property, and it should return `None`\n if a shape is not known or relevant.\n\n If you are interested in getting a shape that is comparable to that of\n a Numpy array, you should use\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.\n \"\"\"\n return None\n\n @property\n def pixel_bounds(self):\n \"\"\"\n The bounds (in pixel coordinates) inside which the WCS is defined,\n as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`\n ``(min, max)`` tuples.\n\n The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``\n order. WCS solutions are sometimes only guaranteed to be accurate\n within a certain range of pixel values, for example when defining a\n WCS that includes fitted distortions. This is an optional property,\n and it should return `None` if a shape is not known or relevant.\n \"\"\"\n return None\n\n @property\n def pixel_axis_names(self):\n \"\"\"\n An iterable of strings describing the name for each pixel axis.\n\n If an axis does not have a name, an empty string should be returned\n (this is the default behavior for all axes if a subclass does not\n override this property). Note that these names are just for display\n purposes and are not standardized.\n \"\"\"\n return [''] * self.pixel_n_dim\n\n @property\n def world_axis_names(self):\n \"\"\"\n An iterable of strings describing the name for each world axis.\n\n If an axis does not have a name, an empty string should be returned\n (this is the default behavior for all axes if a subclass does not\n override this property). Note that these names are just for display\n purposes and are not standardized. For standardized axis types, see\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.\n \"\"\"\n return [''] * self.world_n_dim\n\n @property\n def axis_correlation_matrix(self):\n \"\"\"\n Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,\n `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that\n indicates using booleans whether a given world coordinate depends on a\n given pixel coordinate.\n\n This defaults to a matrix where all elements are `True` in the absence\n of any further information. For completely independent axes, the\n diagonal would be `True` and all other entries `False`.\n \"\"\"\n return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)\n\n @property\n def serialized_classes(self):\n \"\"\"\n Indicates whether Python objects are given in serialized form or as\n actual Python objects.\n \"\"\"\n return False\n\n def _as_mpl_axes(self):\n \"\"\"\n Compatibility hook for Matplotlib and WCSAxes. With this method, one can\n do::\n\n from astropy.wcs import WCS\n import matplotlib.pyplot as plt\n wcs = WCS('filename.fits')\n fig = plt.figure()\n ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)\n ...\n\n and this will generate a plot with the correct WCS coordinates on the\n axes.\n \"\"\"\n from astropy.visualization.wcsaxes import WCSAxes\n return WCSAxes, {'wcs': self}\n\n\nUCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')\nwith open(UCDS_FILE) as f:\n VALID_UCDS = {x.strip() for x in f.read().splitlines()[1:]}\n\n\ndef validate_physical_types(physical_types):\n \"\"\"\n Validate a list of physical types against the UCD1+ standard\n \"\"\"\n for physical_type in physical_types:\n if (physical_type is not None and\n physical_type not in VALID_UCDS and\n not physical_type.startswith('custom:')):\n raise ValueError(\n f\"'{physical_type}' is not a valid IOVA UCD1+ physical type. \"\n \"It must be a string specified in the list (http://www.ivoa.net/documents/latest/UCDlist.html) \"\n \"or if no matching type exists it can be any string prepended with 'custom:'.\"\n )\n"}}},{"rowIdx":1384,"cells":{"hash":{"kind":"string","value":"bf735d3e5c7fcb3418b3e0967bd8c1285775e898a5c1749eb3f6aee4b38d4727"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport importlib\nimport numpy as np\n\n__all__ = ['deserialize_class', 'wcs_info_str']\n\n\ndef deserialize_class(tpl, construct=True):\n \"\"\"\n Deserialize classes recursively.\n \"\"\"\n\n if not isinstance(tpl, tuple) or len(tpl) != 3:\n raise ValueError(\"Expected a tuple of three values\")\n\n module, klass = tpl[0].rsplit('.', 1)\n module = importlib.import_module(module)\n klass = getattr(module, klass)\n\n args = tuple(deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1])\n\n kwargs = dict((key, deserialize_class(val)) if isinstance(val, tuple) else (key, val) for (key, val) in tpl[2].items())\n\n if construct:\n return klass(*args, **kwargs)\n else:\n return klass, args, kwargs\n\n\ndef wcs_info_str(wcs):\n\n # Overall header\n\n s = f'{wcs.__class__.__name__} Transformation\\n\\n'\n s += ('This transformation has {} pixel and {} world dimensions\\n\\n'\n .format(wcs.pixel_n_dim, wcs.world_n_dim))\n s += f'Array shape (Numpy order): {wcs.array_shape}\\n\\n'\n\n # Pixel dimensions table\n\n array_shape = wcs.array_shape or (0,)\n pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim\n\n # Find largest between header size and value length\n pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))\n pixel_nam_width = max(9, max(len(x) for x in wcs.pixel_axis_names))\n pixel_siz_width = max(9, len(str(max(array_shape))))\n\n s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +\n ('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +\n ('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +\n 'Bounds\\n')\n\n for ipix in range(wcs.pixel_n_dim):\n s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +\n ('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +\n (\" \" * 5 + str(None) if pixel_shape[ipix] is None else\n ('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +\n '{:s}'.format(str(None if wcs.pixel_bounds is None else wcs.pixel_bounds[ipix]) + '\\n'))\n\n s += '\\n'\n\n # World dimensions table\n\n # Find largest between header size and value length\n world_dim_width = max(9, len(str(wcs.world_n_dim)))\n world_nam_width = max(9, max(len(x) if x is not None else 0 for x in wcs.world_axis_names))\n world_typ_width = max(13, max(len(x) if x is not None else 0 for x in wcs.world_axis_physical_types))\n\n s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +\n ('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +\n ('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +\n 'Units\\n')\n\n for iwrl in range(wcs.world_n_dim):\n\n name = wcs.world_axis_names[iwrl] or 'None'\n typ = wcs.world_axis_physical_types[iwrl] or 'None'\n unit = wcs.world_axis_units[iwrl] or 'unknown'\n\n s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +\n ('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +\n ('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +\n '{:s}'.format(unit + '\\n'))\n s += '\\n'\n\n # Axis correlation matrix\n\n pixel_dim_width = max(3, len(str(wcs.world_n_dim)))\n\n s += 'Correlation between pixel and world axes:\\n\\n'\n\n s += (' ' * world_dim_width + ' ' +\n ('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +\n '\\n')\n\n s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +\n ''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)\n for ipix in range(wcs.pixel_n_dim)]) +\n '\\n')\n\n matrix = wcs.axis_correlation_matrix\n matrix_str = np.empty(matrix.shape, dtype='U3')\n matrix_str[matrix] = 'yes'\n matrix_str[~matrix] = 'no'\n\n for iwrl in range(wcs.world_n_dim):\n s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +\n ''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])\n for ipix in range(wcs.pixel_n_dim)]) +\n '\\n')\n\n # Make sure we get rid of the extra whitespace at the end of some lines\n return '\\n'.join([l.rstrip() for l in s.splitlines()])\n"}}},{"rowIdx":1385,"cells":{"hash":{"kind":"string","value":"c1a891af6c441624e237d08deeb3bdd0d0595fb388c1d83fae3a4d806495fb71"},"content":{"kind":"string","value":"# This file includes the definition of a mix-in class that provides the low-\n# and high-level WCS API to the astropy.wcs.WCS object. We keep this code\n# isolated in this mix-in class to avoid making the main wcs.py file too\n# long.\n\nimport warnings\n\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates import SpectralCoord, Galactic, ICRS\nfrom astropy.coordinates.spectral_coordinate import update_differentials_to_match, attach_zero_velocities\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom astropy.constants import c\n\nfrom .low_level_api import BaseLowLevelWCS\nfrom .high_level_api import HighLevelWCSMixin\nfrom .wrappers import SlicedLowLevelWCS\n\n__all__ = ['custom_ctype_to_ucd_mapping', 'SlicedFITSWCS', 'FITSWCSAPIMixin']\n\nC_SI = c.si.value\n\nVELOCITY_FRAMES = {\n 'GEOCENT': 'gcrs',\n 'BARYCENT': 'icrs',\n 'HELIOCENT': 'hcrs',\n 'LSRK': 'lsrk',\n 'LSRD': 'lsrd'\n}\n\n# The spectra velocity frames below are needed for FITS spectral WCS\n# (see Greisen 06 table 12) but aren't yet defined as real\n# astropy.coordinates frames, so we instead define them here as instances\n# of existing coordinate frames with offset velocities. In future we should\n# make these real frames so that users can more easily recognize these\n# velocity frames when used in SpectralCoord.\n\n# This frame is defined as a velocity of 220 km/s in the\n# direction of l=90, b=0. The rotation velocity is defined\n# in:\n#\n# Kerr and Lynden-Bell 1986, Review of galactic constants.\n#\n# NOTE: this may differ from the assumptions of galcen_v_sun\n# in the Galactocentric frame - the value used here is\n# the one adopted by the WCS standard for spectral\n# transformations.\n\nVELOCITY_FRAMES['GALACTOC'] = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,\n U=0 * u.km / u.s, V=-220 * u.km / u.s, W=0 * u.km / u.s,\n representation_type='cartesian',\n differential_type='cartesian')\n\n# This frame is defined as a velocity of 300 km/s in the\n# direction of l=90, b=0. This is defined in:\n#\n# Transactions of the IAU Vol. XVI B Proceedings of the\n# 16th General Assembly, Reports of Meetings of Commissions:\n# Comptes Rendus Des Séances Des Commissions, Commission 28,\n# p201.\n#\n# Note that these values differ from those used by CASA\n# (308 km/s towards l=105, b=-7) but we use the above values\n# since these are the ones defined in Greisen et al (2006).\n\nVELOCITY_FRAMES['LOCALGRP'] = Galactic(u=0 * u.km, v=0 * u.km, w=0 * u.km,\n U=0 * u.km / u.s, V=-300 * u.km / u.s, W=0 * u.km / u.s,\n representation_type='cartesian',\n differential_type='cartesian')\n\n# This frame is defined as a velocity of 368 km/s in the\n# direction of l=263.85, b=48.25. This is defined in:\n#\n# Bennett et al. (2003), First-Year Wilkinson Microwave\n# Anisotropy Probe (WMAP) Observations: Preliminary Maps\n# and Basic Results\n#\n# Note that in that paper, the dipole is expressed as a\n# temperature (T=3.346 +/- 0.017mK)\n\nVELOCITY_FRAMES['CMBDIPOL'] = Galactic(l=263.85 * u.deg, b=48.25 * u.deg, distance=0 * u.km,\n radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km/u.s))\n\n\n# Mapping from CTYPE axis name to UCD1\n\nCTYPE_TO_UCD1 = {\n\n # Celestial coordinates\n 'RA': 'pos.eq.ra',\n 'DEC': 'pos.eq.dec',\n 'GLON': 'pos.galactic.lon',\n 'GLAT': 'pos.galactic.lat',\n 'ELON': 'pos.ecliptic.lon',\n 'ELAT': 'pos.ecliptic.lat',\n 'TLON': 'pos.bodyrc.lon',\n 'TLAT': 'pos.bodyrc.lat',\n 'HPLT': 'custom:pos.helioprojective.lat',\n 'HPLN': 'custom:pos.helioprojective.lon',\n 'HPRZ': 'custom:pos.helioprojective.z',\n 'HGLN': 'custom:pos.heliographic.stonyhurst.lon',\n 'HGLT': 'custom:pos.heliographic.stonyhurst.lat',\n 'CRLN': 'custom:pos.heliographic.carrington.lon',\n 'CRLT': 'custom:pos.heliographic.carrington.lat',\n 'SOLX': 'custom:pos.heliocentric.x',\n 'SOLY': 'custom:pos.heliocentric.y',\n 'SOLZ': 'custom:pos.heliocentric.z',\n\n # Spectral coordinates (WCS paper 3)\n 'FREQ': 'em.freq', # Frequency\n 'ENER': 'em.energy', # Energy\n 'WAVN': 'em.wavenumber', # Wavenumber\n 'WAVE': 'em.wl', # Vacuum wavelength\n 'VRAD': 'spect.dopplerVeloc.radio', # Radio velocity\n 'VOPT': 'spect.dopplerVeloc.opt', # Optical velocity\n 'ZOPT': 'src.redshift', # Redshift\n 'AWAV': 'em.wl', # Air wavelength\n 'VELO': 'spect.dopplerVeloc', # Apparent radial velocity\n 'BETA': 'custom:spect.doplerVeloc.beta', # Beta factor (v/c)\n 'STOKES': 'phys.polarization.stokes', # STOKES parameters\n\n # Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)\n 'TIME': 'time',\n 'TAI': 'time',\n 'TT': 'time',\n 'TDT': 'time',\n 'ET': 'time',\n 'IAT': 'time',\n 'UT1': 'time',\n 'UTC': 'time',\n 'GMT': 'time',\n 'GPS': 'time',\n 'TCG': 'time',\n 'TCB': 'time',\n 'TDB': 'time',\n 'LOCAL': 'time',\n\n # Distance coordinates\n 'DIST': 'pos.distance',\n 'DSUN': 'custom:pos.distance.sunToObserver'\n\n # UT() and TT() are handled separately in world_axis_physical_types\n\n}\n\n# Keep a list of additional custom mappings that have been registered. This\n# is kept as a list in case nested context managers are used\nCTYPE_TO_UCD1_CUSTOM = []\n\n\nclass custom_ctype_to_ucd_mapping:\n \"\"\"\n A context manager that makes it possible to temporarily add new CTYPE to\n UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.\n\n Parameters\n ----------\n mapping : dict\n A dictionary mapping a CTYPE value to a UCD1+ value\n\n Examples\n --------\n\n Consider a WCS with the following CTYPE::\n\n >>> from astropy.wcs import WCS\n >>> wcs = WCS(naxis=1)\n >>> wcs.wcs.ctype = ['SPAM']\n\n By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,\n but this can be overridden::\n\n >>> wcs.world_axis_physical_types\n [None]\n >>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):\n ... wcs.world_axis_physical_types\n ['food.spam']\n \"\"\"\n\n def __init__(self, mapping):\n CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)\n self.mapping = mapping\n\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, tb):\n CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)\n\n\nclass SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):\n pass\n\n\nclass FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):\n \"\"\"\n A mix-in class that is intended to be inherited by the\n :class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API\n \"\"\"\n\n @property\n def pixel_n_dim(self):\n return self.naxis\n\n @property\n def world_n_dim(self):\n return len(self.wcs.ctype)\n\n @property\n def array_shape(self):\n if self.pixel_shape is None:\n return None\n else:\n return self.pixel_shape[::-1]\n\n @array_shape.setter\n def array_shape(self, value):\n if value is None:\n self.pixel_shape = None\n else:\n self.pixel_shape = value[::-1]\n\n @property\n def pixel_shape(self):\n if self._naxis == [0, 0]:\n return None\n else:\n return tuple(self._naxis)\n\n @pixel_shape.setter\n def pixel_shape(self, value):\n if value is None:\n self._naxis = [0, 0]\n else:\n if len(value) != self.naxis:\n raise ValueError(\"The number of data axes, \"\n \"{}, does not equal the \"\n \"shape {}.\".format(self.naxis, len(value)))\n self._naxis = list(value)\n\n @property\n def pixel_bounds(self):\n return self._pixel_bounds\n\n @pixel_bounds.setter\n def pixel_bounds(self, value):\n if value is None:\n self._pixel_bounds = value\n else:\n if len(value) != self.naxis:\n raise ValueError(\"The number of data axes, \"\n \"{}, does not equal the number of \"\n \"pixel bounds {}.\".format(self.naxis, len(value)))\n self._pixel_bounds = list(value)\n\n @property\n def world_axis_physical_types(self):\n types = []\n # TODO: need to support e.g. TT(TAI)\n for ctype in self.wcs.ctype:\n if ctype.upper().startswith(('UT(', 'TT(')):\n types.append('time')\n else:\n ctype_name = ctype.split('-')[0]\n for custom_mapping in CTYPE_TO_UCD1_CUSTOM:\n if ctype_name in custom_mapping:\n types.append(custom_mapping[ctype_name])\n break\n else:\n types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))\n return types\n\n @property\n def world_axis_units(self):\n units = []\n for unit in self.wcs.cunit:\n if unit is None:\n unit = ''\n elif isinstance(unit, u.Unit):\n unit = unit.to_string(format='vounit')\n else:\n try:\n unit = u.Unit(unit).to_string(format='vounit')\n except u.UnitsError:\n unit = ''\n units.append(unit)\n return units\n\n @property\n def world_axis_names(self):\n return list(self.wcs.cname)\n\n @property\n def axis_correlation_matrix(self):\n\n # If there are any distortions present, we assume that there may be\n # correlations between all axes. Maybe if some distortions only apply\n # to the image plane we can improve this?\n if self.has_distortion:\n return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)\n\n # Assuming linear world coordinates along each axis, the correlation\n # matrix would be given by whether or not the PC matrix is zero\n matrix = self.wcs.get_pc() != 0\n\n # We now need to check specifically for celestial coordinates since\n # these can assume correlations because of spherical distortions. For\n # each celestial coordinate we copy over the pixel dependencies from\n # the other celestial coordinates.\n celestial = (self.wcs.axis_types // 1000) % 10 == 2\n celestial_indices = np.nonzero(celestial)[0]\n for world1 in celestial_indices:\n for world2 in celestial_indices:\n if world1 != world2:\n matrix[world1] |= matrix[world2]\n matrix[world2] |= matrix[world1]\n\n return matrix\n\n def pixel_to_world_values(self, *pixel_arrays):\n world = self.all_pix2world(*pixel_arrays, 0)\n return world[0] if self.world_n_dim == 1 else tuple(world)\n\n def world_to_pixel_values(self, *world_arrays):\n # avoid circular import\n from astropy.wcs.wcs import NoConvergence\n try:\n pixel = self.all_world2pix(*world_arrays, 0)\n except NoConvergence as e:\n warnings.warn(str(e))\n # use best_solution contained in the exception and format the same\n # way as all_world2pix does (using _array_converter)\n pixel = self._array_converter(lambda *args: e.best_solution,\n 'input', *world_arrays, 0)\n\n return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)\n\n @property\n def world_axis_object_components(self):\n return self._get_components_and_classes()[0]\n\n @property\n def world_axis_object_classes(self):\n return self._get_components_and_classes()[1]\n\n @property\n def serialized_classes(self):\n return False\n\n def _get_components_and_classes(self):\n\n # The aim of this function is to return whatever is needed for\n # world_axis_object_components and world_axis_object_classes. It's easier\n # to figure it out in one go and then return the values and let the\n # properties return part of it.\n\n # Since this method might get called quite a few times, we need to cache\n # it. We start off by defining a hash based on the attributes of the\n # WCS that matter here (we can't just use the WCS object as a hash since\n # it is mutable)\n wcs_hash = (self.naxis,\n list(self.wcs.ctype),\n list(self.wcs.cunit),\n self.wcs.radesys,\n self.wcs.specsys,\n self.wcs.equinox,\n self.wcs.dateobs,\n self.wcs.lng,\n self.wcs.lat)\n\n # If the cache is present, we need to check that the 'hash' matches.\n if getattr(self, '_components_and_classes_cache', None) is not None:\n cache = self._components_and_classes_cache\n if cache[0] == wcs_hash:\n return cache[1]\n else:\n self._components_and_classes_cache = None\n\n # Avoid circular imports by importing here\n from astropy.wcs.utils import wcs_to_celestial_frame\n from astropy.coordinates import SkyCoord, EarthLocation\n from astropy.time.formats import FITS_DEPRECATED_SCALES\n from astropy.time import Time, TimeDelta\n\n components = [None] * self.naxis\n classes = {}\n\n # Let's start off by checking whether the WCS has a pair of celestial\n # components\n\n if self.has_celestial:\n\n try:\n celestial_frame = wcs_to_celestial_frame(self)\n except ValueError:\n # Some WCSes, e.g. solar, can be recognized by WCSLIB as being\n # celestial but we don't necessarily have frames for them.\n celestial_frame = None\n else:\n\n kwargs = {}\n kwargs['frame'] = celestial_frame\n kwargs['unit'] = u.deg\n\n classes['celestial'] = (SkyCoord, (), kwargs)\n\n components[self.wcs.lng] = ('celestial', 0, 'spherical.lon.degree')\n components[self.wcs.lat] = ('celestial', 1, 'spherical.lat.degree')\n\n # Next, we check for spectral components\n\n if self.has_spectral:\n\n # Find index of spectral coordinate\n ispec = self.wcs.spec\n ctype = self.wcs.ctype[ispec][:4]\n ctype = ctype.upper()\n\n kwargs = {}\n\n # Determine observer location and velocity\n\n # TODO: determine how WCS standard would deal with observer on a\n # spacecraft far from earth. For now assume the obsgeo parameters,\n # if present, give the geocentric observer location.\n\n if np.isnan(self.wcs.obsgeo[0]):\n observer = None\n else:\n earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)\n\n # Get the time scale from TIMESYS or fall back to 'utc'\n tscale = self.wcs.timesys or 'utc'\n\n if np.isnan(self.wcs.mjdavg):\n obstime = Time(self.wcs.mjdobs, format='mjd', scale=tscale,\n location=earth_location)\n else:\n obstime = Time(self.wcs.mjdavg, format='mjd', scale=tscale,\n location=earth_location)\n observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))\n\n if self.wcs.specsys in VELOCITY_FRAMES:\n frame = VELOCITY_FRAMES[self.wcs.specsys]\n observer = observer_location.transform_to(frame)\n if isinstance(frame, str):\n observer = attach_zero_velocities(observer)\n else:\n observer = update_differentials_to_match(observer_location,\n VELOCITY_FRAMES[self.wcs.specsys],\n preserve_observer_frame=True)\n elif self.wcs.specsys == 'TOPOCENT':\n observer = attach_zero_velocities(observer_location)\n else:\n raise NotImplementedError(f'SPECSYS={self.wcs.specsys} not yet supported')\n\n # Determine target\n\n # This is tricker. In principle the target for each pixel is the\n # celestial coordinates of the pixel, but we then need to be very\n # careful about SSYSOBS which is tricky. For now, we set the\n # target using the reference celestial coordinate in the WCS (if\n # any).\n\n if self.has_celestial and celestial_frame is not None:\n\n # NOTE: celestial_frame was defined higher up\n\n # NOTE: we set the distance explicitly to avoid warnings in SpectralCoord\n\n target = SkyCoord(self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],\n self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],\n frame=celestial_frame,\n distance=1000 * u.kpc)\n\n target = attach_zero_velocities(target)\n\n else:\n\n target = None\n\n # SpectralCoord does not work properly if either observer or target\n # are not convertible to ICRS, so if this is the case, we (for now)\n # drop the observer and target from the SpectralCoord and warn the\n # user.\n\n if observer is not None:\n try:\n observer.transform_to(ICRS())\n except Exception:\n warnings.warn('observer cannot be converted to ICRS, so will '\n 'not be set on SpectralCoord', AstropyUserWarning)\n observer = None\n\n if target is not None:\n try:\n target.transform_to(ICRS())\n except Exception:\n warnings.warn('target cannot be converted to ICRS, so will '\n 'not be set on SpectralCoord', AstropyUserWarning)\n target = None\n\n # NOTE: below we include Quantity in classes['spectral'] instead\n # of SpectralCoord - this is because we want to also be able to\n # accept plain quantities.\n\n if ctype == 'ZOPT':\n\n def spectralcoord_from_redshift(redshift):\n if isinstance(redshift, SpectralCoord):\n return redshift\n return SpectralCoord((redshift + 1) * self.wcs.restwav,\n unit=u.m, observer=observer, target=target)\n\n def redshift_from_spectralcoord(spectralcoord):\n # TODO: check target is consistent between WCS and SpectralCoord,\n # if they are not the transformation doesn't make conceptual sense.\n if (observer is None\n or spectralcoord.observer is None\n or spectralcoord.target is None):\n if observer is None:\n msg = 'No observer defined on WCS'\n elif spectralcoord.observer is None:\n msg = 'No observer defined on SpectralCoord'\n else:\n msg = 'No target defined on SpectralCoord'\n warnings.warn(f'{msg}, SpectralCoord '\n 'will be converted without any velocity '\n 'frame change', AstropyUserWarning)\n return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.\n else:\n return spectralcoord.with_observer_stationary_relative_to(observer).to_value(u.m) / self.wcs.restwav - 1.\n\n classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_redshift)\n components[self.wcs.spec] = ('spectral', 0, redshift_from_spectralcoord)\n\n elif ctype == 'BETA':\n\n def spectralcoord_from_beta(beta):\n if isinstance(beta, SpectralCoord):\n return beta\n return SpectralCoord(beta * C_SI,\n unit=u.m / u.s,\n doppler_convention='relativistic',\n doppler_rest=self.wcs.restwav * u.m,\n observer=observer, target=target)\n\n def beta_from_spectralcoord(spectralcoord):\n # TODO: check target is consistent between WCS and SpectralCoord,\n # if they are not the transformation doesn't make conceptual sense.\n doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)\n if (observer is None\n or spectralcoord.observer is None\n or spectralcoord.target is None):\n if observer is None:\n msg = 'No observer defined on WCS'\n elif spectralcoord.observer is None:\n msg = 'No observer defined on SpectralCoord'\n else:\n msg = 'No target defined on SpectralCoord'\n warnings.warn(f'{msg}, SpectralCoord '\n 'will be converted without any velocity '\n 'frame change', AstropyUserWarning)\n return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI\n else:\n return spectralcoord.with_observer_stationary_relative_to(observer).to_value(u.m / u.s, doppler_equiv) / C_SI\n\n classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_beta)\n components[self.wcs.spec] = ('spectral', 0, beta_from_spectralcoord)\n\n else:\n\n kwargs['unit'] = self.wcs.cunit[ispec]\n\n if self.wcs.restfrq > 0:\n if ctype == 'VELO':\n kwargs['doppler_convention'] = 'relativistic'\n kwargs['doppler_rest'] = self.wcs.restfrq * u.Hz\n elif ctype == 'VRAD':\n kwargs['doppler_convention'] = 'radio'\n kwargs['doppler_rest'] = self.wcs.restfrq * u.Hz\n elif ctype == 'VOPT':\n kwargs['doppler_convention'] = 'optical'\n kwargs['doppler_rest'] = self.wcs.restwav * u.m\n\n def spectralcoord_from_value(value):\n if isinstance(value, SpectralCoord):\n return value\n return SpectralCoord(value, observer=observer, target=target, **kwargs)\n\n def value_from_spectralcoord(spectralcoord):\n # TODO: check target is consistent between WCS and SpectralCoord,\n # if they are not the transformation doesn't make conceptual sense.\n if (observer is None\n or spectralcoord.observer is None\n or spectralcoord.target is None):\n if observer is None:\n msg = 'No observer defined on WCS'\n elif spectralcoord.observer is None:\n msg = 'No observer defined on SpectralCoord'\n else:\n msg = 'No target defined on SpectralCoord'\n warnings.warn(f'{msg}, SpectralCoord '\n 'will be converted without any velocity '\n 'frame change', AstropyUserWarning)\n return spectralcoord.to_value(**kwargs)\n else:\n return spectralcoord.with_observer_stationary_relative_to(observer).to_value(**kwargs)\n\n classes['spectral'] = (u.Quantity, (), {}, spectralcoord_from_value)\n components[self.wcs.spec] = ('spectral', 0, value_from_spectralcoord)\n\n # We can then make sure we correctly return Time objects where appropriate\n # (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)\n\n if 'time' in self.world_axis_physical_types:\n\n multiple_time = self.world_axis_physical_types.count('time') > 1\n\n for i in range(self.naxis):\n\n if self.world_axis_physical_types[i] == 'time':\n\n if multiple_time:\n name = f'time.{i}'\n else:\n name = 'time'\n\n # Initialize delta\n reference_time_delta = None\n\n # Extract time scale\n scale = self.wcs.ctype[i].lower()\n\n if scale == 'time':\n if self.wcs.timesys:\n scale = self.wcs.timesys.lower()\n else:\n scale = 'utc'\n\n # Drop sub-scales\n if '(' in scale:\n pos = scale.index('(')\n scale, subscale = scale[:pos], scale[pos+1:-1]\n warnings.warn(f'Dropping unsupported sub-scale '\n f'{subscale.upper()} from scale {scale.upper()}',\n UserWarning)\n\n # TODO: consider having GPS as a scale in Time\n # For now GPS is not a scale, we approximate this by TAI - 19s\n if scale == 'gps':\n reference_time_delta = TimeDelta(19, format='sec')\n scale = 'tai'\n\n elif scale.upper() in FITS_DEPRECATED_SCALES:\n scale = FITS_DEPRECATED_SCALES[scale.upper()]\n\n elif scale not in Time.SCALES:\n raise ValueError(f'Unrecognized time CTYPE={self.wcs.ctype[i]}')\n\n # Determine location\n trefpos = self.wcs.trefpos.lower()\n\n if trefpos.startswith('topocent'):\n # Note that some headers use TOPOCENT instead of TOPOCENTER\n if np.any(np.isnan(self.wcs.obsgeo[:3])):\n warnings.warn('Missing or incomplete observer location '\n 'information, setting location in Time to None',\n UserWarning)\n location = None\n else:\n location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)\n elif trefpos == 'geocenter':\n location = EarthLocation(0, 0, 0, unit=u.m)\n elif trefpos == '':\n location = None\n else:\n # TODO: implement support for more locations when Time supports it\n warnings.warn(f\"Observation location '{trefpos}' is not \"\n \"supported, setting location in Time to None\", UserWarning)\n location = None\n\n reference_time = Time(np.nan_to_num(self.wcs.mjdref[0]),\n np.nan_to_num(self.wcs.mjdref[1]),\n format='mjd', scale=scale,\n location=location)\n\n if reference_time_delta is not None:\n reference_time = reference_time + reference_time_delta\n\n def time_from_reference_and_offset(offset):\n if isinstance(offset, Time):\n return offset\n return reference_time + TimeDelta(offset, format='sec')\n\n def offset_from_time_and_reference(time):\n return (time - reference_time).sec\n\n classes[name] = (Time, (), {}, time_from_reference_and_offset)\n components[i] = (name, 0, offset_from_time_and_reference)\n\n # Fallback: for any remaining components that haven't been identified, just\n # return Quantity as the class to use\n\n for i in range(self.naxis):\n if components[i] is None:\n name = self.wcs.ctype[i].split('-')[0].lower()\n if name == '':\n name = 'world'\n while name in classes:\n name += \"_\"\n classes[name] = (u.Quantity, (), {'unit': self.wcs.cunit[i]})\n components[i] = (name, 0, 'value')\n\n # Keep a cached version of result\n self._components_and_classes_cache = wcs_hash, (components, classes)\n\n return components, classes\n"}}},{"rowIdx":1386,"cells":{"hash":{"kind":"string","value":"e029a7fcd54265dec9d892047e844b6a34e1459148454fca12f47bae21000d2e"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport warnings\nfrom contextlib import nullcontext\n\nimport pytest\n\nfrom packaging.version import Version\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_allclose\n\nfrom astropy.utils.data import get_pkg_data_contents, get_pkg_data_filename\nfrom astropy.utils.exceptions import AstropyUserWarning\nfrom astropy.time import Time\nfrom astropy import units as u\nfrom astropy.utils import unbroadcast\nfrom astropy.coordinates import SkyCoord, EarthLocation, ITRS\nfrom astropy.units import Quantity\nfrom astropy.io import fits\n\nfrom astropy.wcs import _wcs # noqa\nfrom astropy.wcs.wcs import (WCS, Sip, WCSSUB_LONGITUDE, WCSSUB_LATITUDE,\n FITSFixedWarning)\nfrom astropy.wcs.wcsapi.fitswcs import SlicedFITSWCS\nfrom astropy.wcs.utils import (proj_plane_pixel_scales,\n is_proj_plane_distorted,\n non_celestial_pixel_scales,\n wcs_to_celestial_frame,\n celestial_frame_to_wcs, skycoord_to_pixel,\n pixel_to_skycoord, custom_wcs_to_frame_mappings,\n custom_frame_to_wcs_mappings,\n add_stokes_axis_to_wcs,\n pixel_to_pixel,\n _split_matrix,\n _pixel_to_pixel_correlation_matrix,\n _pixel_to_world_correlation_matrix,\n local_partial_pixel_derivatives,\n fit_wcs_from_points,\n obsgeo_to_frame)\nfrom astropy.utils.compat.optional_deps import HAS_SCIPY # noqa\n\n\ndef test_wcs_dropping():\n wcs = WCS(naxis=4)\n wcs.wcs.pc = np.zeros([4, 4])\n np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))\n pc = wcs.wcs.pc # for later use below\n\n dropped = wcs.dropaxis(0)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))\n dropped = wcs.dropaxis(1)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))\n dropped = wcs.dropaxis(2)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))\n dropped = wcs.dropaxis(3)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))\n\n wcs = WCS(naxis=4)\n wcs.wcs.cd = pc\n\n dropped = wcs.dropaxis(0)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))\n dropped = wcs.dropaxis(1)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))\n dropped = wcs.dropaxis(2)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))\n dropped = wcs.dropaxis(3)\n assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))\n\n\ndef test_wcs_swapping():\n wcs = WCS(naxis=4)\n wcs.wcs.pc = np.zeros([4, 4])\n np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))\n pc = wcs.wcs.pc # for later use below\n\n swapped = wcs.swapaxes(0, 1)\n assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))\n swapped = wcs.swapaxes(0, 3)\n assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))\n swapped = wcs.swapaxes(2, 3)\n assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))\n\n wcs = WCS(naxis=4)\n wcs.wcs.cd = pc\n\n swapped = wcs.swapaxes(0, 1)\n assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))\n swapped = wcs.swapaxes(0, 3)\n assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))\n swapped = wcs.swapaxes(2, 3)\n assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))\n\n\n@pytest.mark.parametrize('ndim', (2, 3))\ndef test_add_stokes(ndim):\n wcs = WCS(naxis=ndim)\n\n for ii in range(ndim + 1):\n outwcs = add_stokes_axis_to_wcs(wcs, ii)\n assert outwcs.wcs.naxis == ndim + 1\n assert outwcs.wcs.ctype[ii] == 'STOKES'\n assert outwcs.wcs.cname[ii] == 'STOKES'\n\n\ndef test_slice():\n mywcs = WCS(naxis=2)\n mywcs.wcs.crval = [1, 1]\n mywcs.wcs.cdelt = [0.1, 0.1]\n mywcs.wcs.crpix = [1, 1]\n mywcs._naxis = [1000, 500]\n pscale = 0.1 # from cdelt\n\n slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])\n assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))\n assert slice_wcs._naxis == [1000, 499]\n\n # test that CRPIX maps to CRVAL:\n assert_allclose(\n slice_wcs.wcs_pix2world(*slice_wcs.wcs.crpix, 1),\n slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale\n )\n\n slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])\n assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))\n assert slice_wcs._naxis == [250, 250]\n\n slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))\n assert slice_wcs._naxis == [500, 250]\n\n # Non-integral values do not alter the naxis attribute\n with pytest.warns(AstropyUserWarning):\n slice_wcs = mywcs.slice([slice(50.), slice(20.)])\n assert slice_wcs._naxis == [1000, 500]\n with pytest.warns(AstropyUserWarning):\n slice_wcs = mywcs.slice([slice(50.), slice(20)])\n assert slice_wcs._naxis == [20, 500]\n with pytest.warns(AstropyUserWarning):\n slice_wcs = mywcs.slice([slice(50), slice(20.5)])\n assert slice_wcs._naxis == [1000, 50]\n\n\ndef test_slice_with_sip():\n mywcs = WCS(naxis=2)\n mywcs.wcs.crval = [1, 1]\n mywcs.wcs.cdelt = [0.1, 0.1]\n mywcs.wcs.crpix = [1, 1]\n mywcs._naxis = [1000, 500]\n mywcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']\n a = np.array(\n [[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],\n [0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],\n [-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],\n [-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],\n [-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n b = np.array(\n [[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],\n [0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],\n [6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],\n [3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],\n [-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n mywcs.sip = Sip(a, b, None, None, mywcs.wcs.crpix)\n mywcs.wcs.set()\n pscale = 0.1 # from cdelt\n\n slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])\n # test that CRPIX maps to CRVAL:\n assert_allclose(\n slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),\n slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale\n )\n\n slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])\n # test that CRPIX maps to CRVAL:\n assert_allclose(\n slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),\n slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale\n )\n\n\ndef test_slice_getitem():\n mywcs = WCS(naxis=2)\n mywcs.wcs.crval = [1, 1]\n mywcs.wcs.cdelt = [0.1, 0.1]\n mywcs.wcs.crpix = [1, 1]\n\n slice_wcs = mywcs[1::2, 0::4]\n assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))\n\n mywcs.wcs.crpix = [2, 2]\n slice_wcs = mywcs[1::2, 0::4]\n assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))\n\n # Default: numpy order\n slice_wcs = mywcs[1::2]\n assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))\n\n\ndef test_slice_fitsorder():\n mywcs = WCS(naxis=2)\n mywcs.wcs.crval = [1, 1]\n mywcs.wcs.cdelt = [0.1, 0.1]\n mywcs.wcs.crpix = [1, 1]\n\n slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)\n assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))\n\n slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)\n assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))\n\n slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)\n assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))\n assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))\n\n\ndef test_slice_wcs():\n mywcs = WCS(naxis=2)\n\n sub = mywcs[0]\n assert isinstance(sub, SlicedFITSWCS)\n\n with pytest.raises(IndexError) as exc:\n mywcs[0, ::2]\n assert exc.value.args[0] == \"Slicing WCS with a step is not supported.\"\n\n\ndef test_axis_names():\n mywcs = WCS(naxis=4)\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES']\n\n assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']\n\n mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES']\n\n assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']\n\n\ndef test_celestial():\n mywcs = WCS(naxis=4)\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES']\n cel = mywcs.celestial\n assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN')\n assert cel.axis_type_names == ['RA', 'DEC']\n\n\ndef test_wcs_to_celestial_frame():\n\n # Import astropy.coordinates here to avoid circular imports\n from astropy.coordinates.builtin_frames import ICRS, ITRS, FK5, FK4, Galactic\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.set()\n with pytest.raises(ValueError, match=\"Could not determine celestial frame \"\n \"corresponding to the specified WCS object\"):\n assert wcs_to_celestial_frame(mywcs) is None\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']\n mywcs.wcs.set()\n with pytest.raises(ValueError):\n assert wcs_to_celestial_frame(mywcs) is None\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, ICRS)\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n mywcs.wcs.equinox = 1987.\n mywcs.wcs.set()\n print(mywcs.to_header())\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, FK5)\n assert frame.equinox == Time(1987., format='jyear')\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n mywcs.wcs.equinox = 1982\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, FK4)\n assert frame.equinox == Time(1982., format='byear')\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, Galactic)\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['TLON-CAR', 'TLAT-CAR']\n mywcs.wcs.dateobs = '2017-08-17T12:41:04.430'\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, ITRS)\n assert frame.obstime == Time('2017-08-17T12:41:04.430')\n\n for equinox in [np.nan, 1987, 1982]:\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n mywcs.wcs.radesys = 'ICRS'\n mywcs.wcs.equinox = equinox\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, ICRS)\n\n # Flipped order\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN']\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, ICRS)\n\n # More than two dimensions\n mywcs = WCS(naxis=3)\n mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN']\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, ICRS)\n\n mywcs = WCS(naxis=3)\n mywcs.wcs.ctype = ['GLAT-CAR', 'VELOCITY', 'GLON-CAR']\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, Galactic)\n\n\ndef test_wcs_to_celestial_frame_correlated():\n\n # Regression test for a bug that caused wcs_to_celestial_frame to fail when\n # the celestial axes were correlated with other axes.\n\n # Import astropy.coordinates here to avoid circular imports\n from astropy.coordinates.builtin_frames import ICRS\n\n mywcs = WCS(naxis=3)\n mywcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'\n mywcs.wcs.cd = np.ones((3, 3))\n mywcs.wcs.set()\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, ICRS)\n\n\ndef test_wcs_to_celestial_frame_extend():\n\n mywcs = WCS(naxis=2)\n mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']\n mywcs.wcs.set()\n with pytest.raises(ValueError):\n wcs_to_celestial_frame(mywcs)\n\n class OffsetFrame:\n pass\n\n def identify_offset(wcs):\n if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'):\n return OffsetFrame()\n\n with custom_wcs_to_frame_mappings(identify_offset):\n frame = wcs_to_celestial_frame(mywcs)\n assert isinstance(frame, OffsetFrame)\n\n # Check that things are back to normal after the context manager\n with pytest.raises(ValueError):\n wcs_to_celestial_frame(mywcs)\n\n\ndef test_celestial_frame_to_wcs():\n\n # Import astropy.coordinates here to avoid circular imports\n from astropy.coordinates import ICRS, ITRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame\n\n class FakeFrame(BaseCoordinateFrame):\n pass\n\n frame = FakeFrame()\n with pytest.raises(ValueError) as exc:\n celestial_frame_to_wcs(frame)\n assert exc.value.args[0] == (\"Could not determine WCS corresponding to \"\n \"the specified coordinate frame.\")\n\n frame = ICRS()\n mywcs = celestial_frame_to_wcs(frame)\n mywcs.wcs.set()\n assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')\n assert mywcs.wcs.radesys == 'ICRS'\n assert np.isnan(mywcs.wcs.equinox)\n assert mywcs.wcs.lonpole == 180\n assert mywcs.wcs.latpole == 0\n\n frame = FK5(equinox='J1987')\n mywcs = celestial_frame_to_wcs(frame)\n assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')\n assert mywcs.wcs.radesys == 'FK5'\n assert mywcs.wcs.equinox == 1987.\n\n frame = FK4(equinox='B1982')\n mywcs = celestial_frame_to_wcs(frame)\n assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')\n assert mywcs.wcs.radesys == 'FK4'\n assert mywcs.wcs.equinox == 1982.\n\n frame = FK4NoETerms(equinox='B1982')\n mywcs = celestial_frame_to_wcs(frame)\n assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')\n assert mywcs.wcs.radesys == 'FK4-NO-E'\n assert mywcs.wcs.equinox == 1982.\n\n frame = Galactic()\n mywcs = celestial_frame_to_wcs(frame)\n assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')\n assert mywcs.wcs.radesys == ''\n assert np.isnan(mywcs.wcs.equinox)\n\n frame = Galactic()\n mywcs = celestial_frame_to_wcs(frame, projection='CAR')\n assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')\n assert mywcs.wcs.radesys == ''\n assert np.isnan(mywcs.wcs.equinox)\n\n frame = Galactic()\n mywcs = celestial_frame_to_wcs(frame, projection='CAR')\n mywcs.wcs.crval = [100, -30]\n mywcs.wcs.set()\n assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))\n\n frame = ITRS(obstime=Time('2017-08-17T12:41:04.43'))\n mywcs = celestial_frame_to_wcs(frame, projection='CAR')\n assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')\n assert mywcs.wcs.radesys == 'ITRS'\n assert mywcs.wcs.dateobs == '2017-08-17T12:41:04.430'\n\n frame = ITRS()\n mywcs = celestial_frame_to_wcs(frame, projection='CAR')\n assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')\n assert mywcs.wcs.radesys == 'ITRS'\n assert mywcs.wcs.dateobs == Time('J2000').utc.fits\n\n\ndef test_celestial_frame_to_wcs_extend():\n\n class OffsetFrame:\n pass\n\n frame = OffsetFrame()\n\n with pytest.raises(ValueError):\n celestial_frame_to_wcs(frame)\n\n def identify_offset(frame, projection=None):\n if isinstance(frame, OffsetFrame):\n wcs = WCS(naxis=2)\n wcs.wcs.ctype = ['XOFFSET', 'YOFFSET']\n return wcs\n\n with custom_frame_to_wcs_mappings(identify_offset):\n mywcs = celestial_frame_to_wcs(frame)\n assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET')\n\n # Check that things are back to normal after the context manager\n with pytest.raises(ValueError):\n celestial_frame_to_wcs(frame)\n\n\ndef test_pixscale_nodrop():\n mywcs = WCS(naxis=2)\n mywcs.wcs.cdelt = [0.1, 0.2]\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))\n\n mywcs.wcs.cdelt = [-0.1, 0.2]\n assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))\n\n\ndef test_pixscale_withdrop():\n mywcs = WCS(naxis=3)\n mywcs.wcs.cdelt = [0.1, 0.2, 1]\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT']\n assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))\n\n mywcs.wcs.cdelt = [-0.1, 0.2, 1]\n assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))\n\n\ndef test_pixscale_cd():\n mywcs = WCS(naxis=2)\n mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))\n\n\n@pytest.mark.parametrize('angle',\n (30, 45, 60, 75))\ndef test_pixscale_cd_rotated(angle):\n mywcs = WCS(naxis=2)\n rho = np.radians(angle)\n scale = 0.1\n mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)],\n [scale * np.sin(rho), scale * np.cos(rho)]]\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))\n\n\n@pytest.mark.parametrize('angle',\n (30, 45, 60, 75))\ndef test_pixscale_pc_rotated(angle):\n mywcs = WCS(naxis=2)\n rho = np.radians(angle)\n scale = 0.1\n mywcs.wcs.cdelt = [-scale, scale]\n mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)],\n [np.sin(rho), np.cos(rho)]]\n mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))\n\n\n@pytest.mark.parametrize(('cdelt', 'pc', 'pccd'),\n (([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),\n ([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),\n ([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3]))))\ndef test_pixel_scale_matrix(cdelt, pc, pccd):\n\n mywcs = WCS(naxis=(len(cdelt)))\n mywcs.wcs.cdelt = cdelt\n mywcs.wcs.pc = pc\n\n assert_almost_equal(mywcs.pixel_scale_matrix, pccd)\n\n\n@pytest.mark.parametrize(('ctype', 'cel'),\n ((['RA---TAN', 'DEC--TAN'], True),\n (['RA---TAN', 'DEC--TAN', 'FREQ'], False),\n (['RA---TAN', 'FREQ'], False),))\ndef test_is_celestial(ctype, cel):\n mywcs = WCS(naxis=len(ctype))\n mywcs.wcs.ctype = ctype\n\n assert mywcs.is_celestial == cel\n\n\n@pytest.mark.parametrize(('ctype', 'cel'),\n ((['RA---TAN', 'DEC--TAN'], True),\n (['RA---TAN', 'DEC--TAN', 'FREQ'], True),\n (['RA---TAN', 'FREQ'], False),))\ndef test_has_celestial(ctype, cel):\n mywcs = WCS(naxis=len(ctype))\n mywcs.wcs.ctype = ctype\n\n assert mywcs.has_celestial == cel\n\n\ndef test_has_celestial_correlated():\n # Regression test for astropy/astropy#8416 - has_celestial failed when\n # celestial axes were correlated with other axes.\n mywcs = WCS(naxis=3)\n mywcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'\n mywcs.wcs.cd = np.ones((3, 3))\n mywcs.wcs.set()\n assert mywcs.has_celestial\n\n\n@pytest.mark.parametrize(('cdelt', 'pc', 'cd', 'check_warning'),\n ((np.array([0.1, 0.2]), np.eye(2), np.eye(2), True),\n (np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2), True),\n (np.array([0.1, 0.2]), np.eye(2), None, False),\n (np.array([0.1, 0.2]), None, np.eye(2), True),\n ))\ndef test_noncelestial_scale(cdelt, pc, cd, check_warning):\n\n mywcs = WCS(naxis=2)\n if cd is not None:\n mywcs.wcs.cd = cd\n if pc is not None:\n mywcs.wcs.pc = pc\n\n # TODO: Some inputs emit RuntimeWarning from here onwards.\n # Fix the test data. See @nden's comment in PR 9010.\n if check_warning:\n ctx = pytest.warns()\n else:\n ctx = nullcontext()\n with ctx as warning_lines:\n mywcs.wcs.cdelt = cdelt\n if check_warning:\n for w in warning_lines:\n assert issubclass(w.category, RuntimeWarning)\n assert 'cdelt will be ignored since cd is present' in str(w.message)\n\n mywcs.wcs.ctype = ['RA---TAN', 'FREQ']\n\n ps = non_celestial_pixel_scales(mywcs)\n\n assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))\n\n\n@pytest.mark.parametrize('mode', ['all', 'wcs'])\ndef test_skycoord_to_pixel(mode):\n\n # Import astropy.coordinates here to avoid circular imports\n from astropy.coordinates import SkyCoord\n\n header = get_pkg_data_contents('data/maps/1904-66_TAN.hdr', encoding='binary')\n wcs = WCS(header)\n\n ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')\n\n xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)\n\n # WCS is in FK5 so we need to transform back to ICRS\n new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')\n\n assert_allclose(new.ra.degree, ref.ra.degree)\n assert_allclose(new.dec.degree, ref.dec.degree)\n\n # Make sure you can specify a different class using ``cls`` keyword\n class SkyCoord2(SkyCoord):\n pass\n\n new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode,\n cls=SkyCoord2).transform_to('icrs')\n\n assert new2.__class__ is SkyCoord2\n assert_allclose(new2.ra.degree, ref.ra.degree)\n assert_allclose(new2.dec.degree, ref.dec.degree)\n\n\ndef test_skycoord_to_pixel_swapped():\n\n # Regression test for a bug that caused skycoord_to_pixel and\n # pixel_to_skycoord to not work correctly if the axes were swapped in the\n # WCS.\n\n # Import astropy.coordinates here to avoid circular imports\n from astropy.coordinates import SkyCoord\n\n header = get_pkg_data_contents('data/maps/1904-66_TAN.hdr', encoding='binary')\n wcs = WCS(header)\n\n wcs_swapped = wcs.sub([WCSSUB_LATITUDE, WCSSUB_LONGITUDE])\n\n ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')\n\n xp1, yp1 = skycoord_to_pixel(ref, wcs)\n xp2, yp2 = skycoord_to_pixel(ref, wcs_swapped)\n\n assert_allclose(xp1, xp2)\n assert_allclose(yp1, yp2)\n\n # WCS is in FK5 so we need to transform back to ICRS\n new1 = pixel_to_skycoord(xp1, yp1, wcs).transform_to('icrs')\n new2 = pixel_to_skycoord(xp1, yp1, wcs_swapped).transform_to('icrs')\n\n assert_allclose(new1.ra.degree, new2.ra.degree)\n assert_allclose(new1.dec.degree, new2.dec.degree)\n\n\ndef test_is_proj_plane_distorted():\n # non-orthogonal CD:\n wcs = WCS(naxis=2)\n wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]\n wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n assert(is_proj_plane_distorted(wcs))\n\n # almost orthogonal CD:\n wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]\n assert(not is_proj_plane_distorted(wcs))\n\n # real case:\n header = get_pkg_data_filename('data/sip.fits')\n with pytest.warns(FITSFixedWarning):\n wcs = WCS(header)\n assert(is_proj_plane_distorted(wcs))\n\n\n@pytest.mark.parametrize('mode', ['all', 'wcs'])\ndef test_skycoord_to_pixel_distortions(mode):\n\n # Import astropy.coordinates here to avoid circular imports\n from astropy.coordinates import SkyCoord\n\n header = get_pkg_data_filename('data/sip.fits')\n with pytest.warns(FITSFixedWarning):\n wcs = WCS(header)\n\n ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs')\n\n xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)\n\n # WCS is in FK5 so we need to transform back to ICRS\n new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')\n\n assert_allclose(new.ra.degree, ref.ra.degree)\n assert_allclose(new.dec.degree, ref.dec.degree)\n\n\n@pytest.fixture\ndef spatial_wcs_2d_small_angle():\n \"\"\"\n This WCS has an almost linear correlation between the pixel and world axes\n close to the reference pixel.\n \"\"\"\n wcs = WCS(naxis=2)\n wcs.wcs.ctype = ['HPLN-TAN', 'HPLT-TAN']\n wcs.wcs.crpix = [3.0] * 2\n wcs.wcs.cdelt = [0.002] * 2\n wcs.wcs.crval = [0] * 2\n wcs.wcs.set()\n return wcs\n\n\ndef test_local_pixel_derivatives(spatial_wcs_2d_small_angle):\n not_diag = np.logical_not(np.diag([1, 1]))\n # At (or close to) the reference pixel this should equal the cdelt\n derivs = local_partial_pixel_derivatives(spatial_wcs_2d_small_angle, 3, 3)\n np.testing.assert_allclose(np.diag(derivs), spatial_wcs_2d_small_angle.wcs.cdelt)\n np.testing.assert_allclose(derivs[not_diag].flat, [0, 0], atol=1e-10)\n\n # Far away from the reference pixel this should not equal the cdelt\n derivs = local_partial_pixel_derivatives(spatial_wcs_2d_small_angle, 3e4, 3e4)\n assert not np.allclose(np.diag(derivs), spatial_wcs_2d_small_angle.wcs.cdelt)\n\n # At (or close to) the reference pixel this should equal the cdelt\n derivs = local_partial_pixel_derivatives(\n spatial_wcs_2d_small_angle, 3, 3, normalize_by_world=True)\n np.testing.assert_allclose(np.diag(derivs), [1, 1])\n np.testing.assert_allclose(derivs[not_diag].flat, [0, 0], atol=1e-8)\n\n\ndef test_pixel_to_world_correlation_matrix_celestial():\n\n wcs = WCS(naxis=2)\n wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN'\n wcs.wcs.set()\n\n assert_equal(wcs.axis_correlation_matrix, [[1, 1], [1, 1]])\n matrix, classes = _pixel_to_world_correlation_matrix(wcs)\n assert_equal(matrix, [[1, 1]])\n assert classes == [SkyCoord]\n\n\ndef test_pixel_to_world_correlation_matrix_spectral_cube_uncorrelated():\n\n wcs = WCS(naxis=3)\n wcs.wcs.ctype = 'RA---TAN', 'FREQ', 'DEC--TAN'\n wcs.wcs.set()\n\n assert_equal(wcs.axis_correlation_matrix, [[1, 0, 1], [0, 1, 0], [1, 0, 1]])\n matrix, classes = _pixel_to_world_correlation_matrix(wcs)\n assert_equal(matrix, [[1, 0, 1], [0, 1, 0]])\n assert classes == [SkyCoord, Quantity]\n\n\ndef test_pixel_to_world_correlation_matrix_spectral_cube_correlated():\n\n wcs = WCS(naxis=3)\n wcs.wcs.ctype = 'RA---TAN', 'FREQ', 'DEC--TAN'\n wcs.wcs.cd = np.ones((3, 3))\n wcs.wcs.set()\n\n assert_equal(wcs.axis_correlation_matrix, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n matrix, classes = _pixel_to_world_correlation_matrix(wcs)\n assert_equal(matrix, [[1, 1, 1], [1, 1, 1]])\n assert classes == [SkyCoord, Quantity]\n\n\ndef test_pixel_to_pixel_correlation_matrix_celestial():\n\n wcs_in = WCS(naxis=2)\n wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN'\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=2)\n wcs_out.wcs.ctype = 'DEC--TAN', 'RA---TAN'\n wcs_out.wcs.set()\n\n matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)\n assert_equal(matrix, [[1, 1], [1, 1]])\n\n\ndef test_pixel_to_pixel_correlation_matrix_spectral_cube_uncorrelated():\n\n wcs_in = WCS(naxis=3)\n wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=3)\n wcs_out.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'\n wcs_out.wcs.set()\n\n matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)\n assert_equal(matrix, [[1, 1, 0], [0, 0, 1], [1, 1, 0]])\n\n\ndef test_pixel_to_pixel_correlation_matrix_spectral_cube_correlated():\n\n # NOTE: only make one of the WCSes have correlated axes to really test this\n\n wcs_in = WCS(naxis=3)\n wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'FREQ'\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=3)\n wcs_out.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'\n wcs_out.wcs.cd = np.ones((3, 3))\n wcs_out.wcs.set()\n\n matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)\n assert_equal(matrix, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n\n\ndef test_pixel_to_pixel_correlation_matrix_mismatch():\n\n wcs_in = WCS(naxis=2)\n wcs_in.wcs.ctype = 'RA---TAN', 'DEC--TAN'\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=3)\n wcs_out.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'\n wcs_out.wcs.set()\n\n with pytest.raises(ValueError) as exc:\n _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)\n assert exc.value.args[0] == \"The two WCS return a different number of world coordinates\"\n\n wcs3 = WCS(naxis=2)\n wcs3.wcs.ctype = 'FREQ', 'PIXEL'\n wcs3.wcs.set()\n\n with pytest.raises(ValueError) as exc:\n _pixel_to_pixel_correlation_matrix(wcs_out, wcs3)\n assert exc.value.args[0] == \"The world coordinate types of the two WCS do not match\"\n\n wcs4 = WCS(naxis=4)\n wcs4.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'Q1', 'Q2'\n wcs4.wcs.cunit = ['deg', 'deg', 'm/s', 'm/s']\n wcs4.wcs.set()\n\n wcs5 = WCS(naxis=4)\n wcs5.wcs.ctype = 'Q1', 'RA---TAN', 'DEC--TAN', 'Q2'\n wcs5.wcs.cunit = ['m/s', 'deg', 'deg', 'm/s']\n wcs5.wcs.set()\n\n with pytest.raises(ValueError, match=\"World coordinate order doesn't match \"\n \"and automatic matching is ambiguous\"):\n _pixel_to_pixel_correlation_matrix(wcs4, wcs5)\n\n\ndef test_pixel_to_pixel_correlation_matrix_nonsquare():\n\n # Here we set up an input WCS that maps 3 pixel coordinates to 4 world\n # coordinates - the idea is to make sure that things work fine in cases\n # where the number of input and output pixel coordinates do not match.\n\n class FakeWCS:\n pass\n\n wcs_in = FakeWCS()\n wcs_in.low_level_wcs = wcs_in\n wcs_in.pixel_n_dim = 3\n wcs_in.world_n_dim = 4\n wcs_in.axis_correlation_matrix = [[True, True, False],\n [True, True, False],\n [True, True, False],\n [False, False, True]]\n wcs_in.world_axis_object_components = [('spat', 'ra', 'ra.degree'),\n ('spat', 'dec', 'dec.degree'),\n ('spec', 0, 'value'),\n ('time', 0, 'utc.value')]\n wcs_in.world_axis_object_classes = {'spat': ('astropy.coordinates.SkyCoord', (),\n {'frame': 'icrs'}),\n 'spec': ('astropy.units.Wavelength', (None,), {}),\n 'time': ('astropy.time.Time', (None,),\n {'format': 'mjd', 'scale': 'utc'})}\n\n wcs_out = FakeWCS()\n wcs_out.low_level_wcs = wcs_out\n wcs_out.pixel_n_dim = 4\n wcs_out.world_n_dim = 4\n wcs_out.axis_correlation_matrix = [[True, False, False, False],\n [False, True, True, False],\n [False, True, True, False],\n [False, False, False, True]]\n wcs_out.world_axis_object_components = [('spec', 0, 'value'),\n ('spat', 'ra', 'ra.degree'),\n ('spat', 'dec', 'dec.degree'),\n ('time', 0, 'utc.value')]\n wcs_out.world_axis_object_classes = wcs_in.world_axis_object_classes\n\n matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)\n\n matrix = matrix.astype(int)\n\n # The shape should be (n_pixel_out, n_pixel_in)\n assert matrix.shape == (4, 3)\n\n expected = np.array([[1, 1, 0], [1, 1, 0], [1, 1, 0], [0, 0, 1]])\n assert_equal(matrix, expected)\n\n\ndef test_split_matrix():\n\n assert _split_matrix(np.array([[1]])) == [([0], [0])]\n\n assert _split_matrix(np.array([[1, 1],\n [1, 1]])) == [([0, 1], [0, 1])]\n\n assert _split_matrix(np.array([[1, 1, 0],\n [1, 1, 0],\n [0, 0, 1]])) == [([0, 1], [0, 1]), ([2], [2])]\n\n assert _split_matrix(np.array([[0, 1, 0],\n [1, 0, 0],\n [0, 0, 1]])) == [([0], [1]), ([1], [0]), ([2], [2])]\n\n assert _split_matrix(np.array([[0, 1, 1],\n [1, 0, 0],\n [1, 0, 1]])) == [([0, 1, 2], [0, 1, 2])]\n\n\ndef test_pixel_to_pixel():\n\n wcs_in = WCS(naxis=3)\n wcs_in.wcs.ctype = 'DEC--TAN', 'FREQ', 'RA---TAN'\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=3)\n wcs_out.wcs.ctype = 'GLON-CAR', 'GLAT-CAR', 'FREQ'\n wcs_out.wcs.set()\n\n # First try with scalars\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n x, y, z = pixel_to_pixel(wcs_in, wcs_out, 1, 2, 3)\n assert x.shape == ()\n assert y.shape == ()\n assert z.shape == ()\n\n # Now try with broadcasted arrays\n x = np.linspace(10, 20, 10)\n y = np.linspace(10, 20, 20)\n z = np.linspace(10, 20, 30)\n Z1, Y1, X1 = np.meshgrid(z, y, x, indexing='ij', copy=False)\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n X2, Y2, Z2 = pixel_to_pixel(wcs_in, wcs_out, X1, Y1, Z1)\n\n # The final arrays should have the correct shape\n assert X2.shape == (30, 20, 10)\n assert Y2.shape == (30, 20, 10)\n assert Z2.shape == (30, 20, 10)\n\n # But behind the scenes should also be broadcasted\n assert unbroadcast(X2).shape == (30, 1, 10)\n assert unbroadcast(Y2).shape == (30, 1, 10)\n assert unbroadcast(Z2).shape == (20, 1)\n\n # We can put the values back through the function to ensure round-tripping\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n X3, Y3, Z3 = pixel_to_pixel(wcs_out, wcs_in, X2, Y2, Z2)\n\n # The final arrays should have the correct shape\n assert X2.shape == (30, 20, 10)\n assert Y2.shape == (30, 20, 10)\n assert Z2.shape == (30, 20, 10)\n\n # But behind the scenes should also be broadcasted\n assert unbroadcast(X3).shape == (30, 1, 10)\n assert unbroadcast(Y3).shape == (20, 1)\n assert unbroadcast(Z3).shape == (30, 1, 10)\n\n # And these arrays should match the input\n assert_allclose(X1, X3)\n assert_allclose(Y1, Y3)\n assert_allclose(Z1, Z3)\n\n\ndef test_pixel_to_pixel_correlated():\n\n wcs_in = WCS(naxis=2)\n wcs_in.wcs.ctype = 'DEC--TAN', 'RA---TAN'\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=2)\n wcs_out.wcs.ctype = 'GLON-CAR', 'GLAT-CAR'\n wcs_out.wcs.set()\n\n # First try with scalars\n x, y = pixel_to_pixel(wcs_in, wcs_out, 1, 2)\n assert x.shape == ()\n assert y.shape == ()\n\n # Now try with broadcasted arrays\n x = np.linspace(10, 20, 10)\n y = np.linspace(10, 20, 20)\n Y1, X1 = np.meshgrid(y, x, indexing='ij', copy=False)\n Y2, X2 = pixel_to_pixel(wcs_in, wcs_out, X1, Y1)\n\n # The final arrays should have the correct shape\n assert X2.shape == (20, 10)\n assert Y2.shape == (20, 10)\n\n # and there are no efficiency gains here since the celestial axes are correlated\n assert unbroadcast(X2).shape == (20, 10)\n\n\ndef test_pixel_to_pixel_1d():\n\n # Simple test to make sure that when WCS only returns one world coordinate\n # this still works correctly (since this requires special treatment behind\n # the scenes).\n\n wcs_in = WCS(naxis=1)\n wcs_in.wcs.ctype = 'COORD1',\n wcs_in.wcs.cunit = 'nm',\n wcs_in.wcs.set()\n\n wcs_out = WCS(naxis=1)\n wcs_out.wcs.ctype = 'COORD2',\n wcs_out.wcs.cunit = 'cm',\n wcs_out.wcs.set()\n\n # First try with a scalar\n x = pixel_to_pixel(wcs_in, wcs_out, 1)\n assert x.shape == ()\n\n # Next with a regular array\n x = np.linspace(10, 20, 10)\n x = pixel_to_pixel(wcs_in, wcs_out, x)\n assert x.shape == (10,)\n\n # And now try with a broadcasted array\n x = np.broadcast_to(np.linspace(10, 20, 10), (4, 10))\n x = pixel_to_pixel(wcs_in, wcs_out, x)\n assert x.shape == (4, 10)\n\n # The broadcasting of the input should be retained\n assert unbroadcast(x).shape == (10,)\n\n\nheader_str_linear = \"\"\"\nXTENSION= 'IMAGE ' / Image extension\nBITPIX = -32 / array data type\nNAXIS = 2 / number of array dimensions\nNAXIS1 = 50\nNAXIS2 = 50\nPCOUNT = 0 / number of parameters\nGCOUNT = 1 / number of groups\nRADESYS = 'ICRS '\nEQUINOX = 2000.0\nWCSAXES = 2\nCTYPE1 = 'RA---TAN'\nCTYPE2 = 'DEC--TAN'\nCRVAL1 = 250.3497414839765\nCRVAL2 = 2.280925599609063\nCRPIX1 = 1045.0\nCRPIX2 = 1001.0\nCD1_1 = -0.005564478186178\nCD1_2 = -0.001042099258152\nCD2_1 = 0.00118144146585\nCD2_2 = -0.005590816683583\n\"\"\"\nheader_str_sip = \"\"\"\nXTENSION= 'IMAGE ' / Image extension\nBITPIX = -32 / array data type\nNAXIS = 2 / number of array dimensions\nNAXIS1 = 50\nNAXIS2 = 50\nPCOUNT = 0 / number of parameters\nGCOUNT = 1 / number of groups\nRADESYS = 'ICRS '\nEQUINOX = 2000.0\nWCSAXES = 2\nCTYPE1 = 'RA---TAN-SIP'\nCTYPE2 = 'DEC--TAN-SIP'\nCRVAL1 = 250.3497414839765\nCRVAL2 = 2.280925599609063\nCRPIX1 = 1045.0\nCRPIX2 = 1001.0\nCD1_1 = -0.005564478186178\nCD1_2 = -0.001042099258152\nCD2_1 = 0.00118144146585\nCD2_2 = -0.005590816683583\nA_ORDER = 2\nB_ORDER = 2\nA_2_0 = 2.02451189234E-05\nA_0_2 = 3.317603337918E-06\nA_1_1 = 1.73456334971071E-05\nB_2_0 = 3.331330003472E-06\nB_0_2 = 2.04247482482589E-05\nB_1_1 = 1.71476710804143E-05\nAP_ORDER= 2\nBP_ORDER= 2\nAP_1_0 = 0.000904700296389636\nAP_0_1 = 0.000627660715584716\nAP_2_0 = -2.023482905861E-05\nAP_0_2 = -3.332285841011E-06\nAP_1_1 = -1.731636633824E-05\nBP_1_0 = 0.000627960882053211\nBP_0_1 = 0.000911222886084808\nBP_2_0 = -3.343918167224E-06\nBP_0_2 = -2.041598249021E-05\nBP_1_1 = -1.711876336719E-05\nA_DMAX = 44.72893589844534\nB_DMAX = 44.62692873032506\n\"\"\"\nheader_str_prob = \"\"\"\nNAXIS = 2 / number of array dimensions\nWCSAXES = 2 / Number of coordinate axes\nCRPIX1 = 1024.5 / Pixel coordinate of reference point\nCRPIX2 = 1024.5 / Pixel coordinate of reference point\nCD1_1 = -1.7445934400771E-05 / Coordinate transformation matrix element\nCD1_2 = -4.9826985362578E-08 / Coordinate transformation matrix element\nCD2_1 = -5.0068838822312E-08 / Coordinate transformation matrix element\nCD2_2 = 1.7530614610951E-05 / Coordinate transformation matrix element\nCTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection\nCTYPE2 = 'DEC--TAN' / Declination, gnomonic projection\nCRVAL1 = 5.8689341666667 / [deg] Coordinate value at reference point\nCRVAL2 = -71.995508583333 / [deg] Coordinate value at reference point\n\"\"\"\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\n@pytest.mark.parametrize(\n 'header_str,crval,sip_degree,user_proj_point,exp_max_dist,exp_std_dist',\n [\n # simple testset no distortions\n (header_str_linear, 250.3497414839765, None, False, 7e-5*u.deg, 2.5e-5*u.deg),\n # simple testset with distortions\n (header_str_sip, 250.3497414839765, 2, False, 7e-6*u.deg, 2.5e-6*u.deg),\n # testset with problematic WCS header that failed before\n (header_str_prob, 5.8689341666667, None, False, 7e-6*u.deg, 2.5e-6*u.deg),\n # simple testset no distortions, user defined center\n (header_str_linear, 250.3497414839765, None, True, 7e-5*u.deg, 2.5e-5*u.deg),\n # 360->0 degree crossover, simple testset no distortions\n (header_str_linear, 352.3497414839765, None, False, 7e-5*u.deg, 2.5e-5*u.deg),\n # 360->0 degree crossover, simple testset with distortions\n (header_str_sip, 352.3497414839765, 2, False, 7e-6*u.deg, 2.5e-6*u.deg),\n # 360->0 degree crossover, testset with problematic WCS header that failed before\n (header_str_prob, 352.3497414839765, None, False, 7e-6*u.deg, 2.5e-6*u.deg),\n # 360->0 degree crossover, simple testset no distortions, user defined center\n (header_str_linear, 352.3497414839765, None, True, 7e-5*u.deg, 2.5e-5*u.deg),\n ])\ndef test_fit_wcs_from_points(header_str, crval, sip_degree, user_proj_point,\n exp_max_dist, exp_std_dist):\n header = fits.Header.fromstring(header_str, sep='\\n')\n header[\"CRVAL1\"] = crval\n\n true_wcs = WCS(header, relax=True)\n\n # Getting the pixel coordinates\n x, y = np.meshgrid(list(range(10)), list(range(10)))\n x = x.flatten()\n y = y.flatten()\n\n # Calculating the true sky positions\n world_pix = true_wcs.pixel_to_world(x, y)\n\n # which projection point to use\n if user_proj_point:\n proj_point = world_pix[0]\n projlon = proj_point.data.lon.deg\n projlat = proj_point.data.lat.deg\n else:\n proj_point = 'center'\n\n # Fitting the wcs\n fit_wcs = fit_wcs_from_points((x, y), world_pix,\n proj_point=proj_point,\n sip_degree=sip_degree)\n\n # Validate that the true sky coordinates\n # match sky coordinates calculated from the wcs fit\n world_pix_new = fit_wcs.pixel_to_world(x, y)\n\n dists = world_pix.separation(world_pix_new)\n\n assert dists.max() < exp_max_dist\n assert np.std(dists) < exp_std_dist\n\n if user_proj_point:\n assert (fit_wcs.wcs.crval == [projlon, projlat]).all()\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\ndef test_fit_wcs_from_points_CRPIX_bounds():\n # Test CRPIX bounds requirement\n wcs_str = \"\"\"\nWCSAXES = 2 / Number of coordinate axes\nCRPIX1 = 1045.0 / Pixel coordinate of reference point\nCRPIX2 = 1001.0 / Pixel coordinate of reference point\nPC1_1 = 0.00056205870415378 / Coordinate transformation matrix element\nPC1_2 = -0.00569181083243 / Coordinate transformation matrix element\nPC2_1 = 0.0056776810932466 / Coordinate transformation matrix element\nPC2_2 = 0.0004208048403273 / Coordinate transformation matrix element\nCDELT1 = 1.0 / [deg] Coordinate increment at reference point\nCDELT2 = 1.0 / [deg] Coordinate increment at reference point\nCUNIT1 = 'deg' / Units of coordinate increment and value\nCUNIT2 = 'deg' / Units of coordinate increment and value\nCTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection\nCTYPE2 = 'DEC--TAN' / Declination, gnomonic projection\nCRVAL1 = 104.57797893504 / [deg] Coordinate value at reference point\nCRVAL2 = -74.195502593322 / [deg] Coordinate value at reference point\nLONPOLE = 180.0 / [deg] Native longitude of celestial pole\nLATPOLE = -74.195502593322 / [deg] Native latitude of celestial pole\nTIMESYS = 'TDB' / Time scale\nTIMEUNIT= 'd' / Time units\nDATEREF = '1858-11-17' / ISO-8601 fiducial time\nMJDREFI = 0.0 / [d] MJD of fiducial time, integer part\nMJDREFF = 0.0 / [d] MJD of fiducial time, fractional part\nDATE-OBS= '2019-03-27T03:30:13.832Z' / ISO-8601 time of observation\nMJD-OBS = 58569.145993426 / [d] MJD of observation\nMJD-OBS = 58569.145993426 / [d] MJD at start of observation\nTSTART = 1569.6467941661 / [d] Time elapsed since fiducial time at start\nDATE-END= '2019-03-27T04:00:13.831Z' / ISO-8601 time at end of observation\nMJD-END = 58569.166826748 / [d] MJD at end of observation\nTSTOP = 1569.6676274905 / [d] Time elapsed since fiducial time at end\nTELAPSE = 0.02083332443 / [d] Elapsed time (start to stop)\nTIMEDEL = 0.020833333333333 / [d] Time resolution\nTIMEPIXR= 0.5 / Reference position of timestamp in binned data\nRADESYS = 'ICRS' / Equatorial coordinate system\n\"\"\"\n wcs_header = fits.Header.fromstring(wcs_str, sep='\\n')\n ffi_wcs = WCS(wcs_header)\n\n yi, xi = (1000, 1000)\n y, x = (10, 200)\n\n center_coord = SkyCoord(ffi_wcs.all_pix2world([[xi+x//2, yi+y//2]], 0), unit='deg')[0]\n ypix, xpix = (arr.flatten() for arr in np.mgrid[xi : xi + x, yi : yi + y])\n world_pix = SkyCoord(*ffi_wcs.all_pix2world(xpix, ypix, 0), unit='deg')\n\n fit_wcs = fit_wcs_from_points((ypix, xpix), world_pix, proj_point='center')\n\n assert (fit_wcs.wcs.crpix.astype(int) == [1100, 1005]).all()\n assert fit_wcs.pixel_shape == (1199, 1009)\n\n\n@pytest.mark.skipif('not HAS_SCIPY')\ndef test_issue10991():\n # test issue #10991 (it just needs to run and set the user defined crval)\n xy = np.array([[1766.88276168, 662.96432257, 171.50212526, 120.70924648],\n [1706.69832901, 1788.85480559, 1216.98949653, 1307.41843381]])\n world_coords = SkyCoord([(66.3542367, 22.20000162), (67.15416174, 19.18042906),\n (65.73375432, 17.54251555), (66.02400512, 17.44413253)],\n frame=\"icrs\", unit=\"deg\")\n proj_point = SkyCoord(64.67514918, 19.63389538,\n frame=\"icrs\", unit=\"deg\")\n\n fit_wcs = fit_wcs_from_points(\n xy=xy,\n world_coords=world_coords,\n proj_point=proj_point,\n projection='TAN'\n )\n projlon = proj_point.data.lon.deg\n projlat = proj_point.data.lat.deg\n assert (fit_wcs.wcs.crval == [projlon, projlat]).all()\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize('x_in,y_in', [[0, 0], [np.arange(5), np.arange(5)]])\ndef test_pixel_to_world_itrs(x_in, y_in):\n \"\"\"Regression test for https://github.com/astropy/astropy/pull/9609\"\"\"\n if Version(_wcs.__version__) >= Version('7.4'):\n ctx = pytest.warns(\n FITSFixedWarning,\n match=r\"'datfix' made the change 'Set MJD-OBS to 57982\\.528524 from DATE-OBS'\\.\")\n else:\n ctx = nullcontext()\n\n with ctx:\n wcs = WCS({'NAXIS': 2,\n 'CTYPE1': 'TLON-CAR',\n 'CTYPE2': 'TLAT-CAR',\n 'RADESYS': 'ITRS ',\n 'DATE-OBS': '2017-08-17T12:41:04.444'})\n\n # This shouldn't raise an exception.\n coord = wcs.pixel_to_world(x_in, y_in)\n\n # Check round trip transformation.\n x, y = wcs.world_to_pixel(coord)\n\n np.testing.assert_almost_equal(x, x_in)\n np.testing.assert_almost_equal(y, y_in)\n\n\n@pytest.fixture\ndef dkist_location():\n return EarthLocation(*(-5466045.25695494, -2404388.73741278, 2242133.88769004) * u.m)\n\n\ndef test_obsgeo_cartesian(dkist_location):\n obstime = Time(\"2021-05-21T03:00:00\")\n wcs = WCS(naxis=2)\n wcs.wcs.obsgeo = list(dkist_location.to_value(u.m).tolist()) + [0, 0, 0]\n wcs.wcs.dateobs = obstime.isot\n\n frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)\n\n assert isinstance(frame, ITRS)\n assert frame.x == dkist_location.x\n assert frame.y == dkist_location.y\n assert frame.z == dkist_location.z\n\n\ndef test_obsgeo_spherical(dkist_location):\n obstime = Time(\"2021-05-21T03:00:00\")\n dkist_location = dkist_location.get_itrs(obstime)\n loc_sph = dkist_location.spherical\n\n wcs = WCS(naxis=2)\n wcs.wcs.obsgeo = [0, 0, 0] + [loc_sph.lon.value, loc_sph.lat.value, loc_sph.distance.value]\n wcs.wcs.dateobs = obstime.isot\n\n frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)\n\n assert isinstance(frame, ITRS)\n assert u.allclose(frame.x, dkist_location.x)\n assert u.allclose(frame.y, dkist_location.y)\n assert u.allclose(frame.z, dkist_location.z)\n\n\ndef test_obsgeo_infinite(dkist_location):\n obstime = Time(\"2021-05-21T03:00:00\")\n dkist_location = dkist_location.get_itrs(obstime)\n loc_sph = dkist_location.spherical\n\n wcs = WCS(naxis=2)\n wcs.wcs.obsgeo = [1, 1, np.nan] + [loc_sph.lon.value, loc_sph.lat.value, loc_sph.distance.value]\n wcs.wcs.dateobs = obstime.isot\n wcs.wcs.set()\n\n frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)\n\n assert isinstance(frame, ITRS)\n assert u.allclose(frame.x, dkist_location.x)\n assert u.allclose(frame.y, dkist_location.y)\n assert u.allclose(frame.z, dkist_location.z)\n\n\n@pytest.mark.parametrize(\"obsgeo\", ([np.nan] * 6, None, [0] * 6, [54] * 5))\ndef test_obsgeo_invalid(obsgeo):\n\n with pytest.raises(ValueError):\n obsgeo_to_frame(obsgeo, None)\n"}}},{"rowIdx":1387,"cells":{"hash":{"kind":"string","value":"3f5b9be6f8a45ba4fdad55b3f41928d7ca432913d9f69e1dc8fbb005224f95d1"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport io\nimport os\nfrom contextlib import nullcontext\nfrom datetime import datetime\n\nfrom packaging.version import Version\nimport pytest\nimport numpy as np\nfrom numpy.testing import (\n assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,\n assert_array_equal)\n\nfrom astropy import wcs\nfrom astropy.wcs import _wcs # noqa\nfrom astropy import units as u\nfrom astropy.utils.data import (\n get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)\nfrom astropy.utils.misc import NumpyRNGContext\nfrom astropy.utils.exceptions import (\n AstropyUserWarning, AstropyWarning, AstropyDeprecationWarning)\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.io import fits\nfrom astropy.coordinates import SkyCoord\nfrom astropy.nddata import Cutout2D\n\n_WCSLIB_VER = Version(_wcs.__version__)\n\n\n# NOTE: User can choose to use system wcslib instead of bundled.\ndef ctx_for_v71_dateref_warnings():\n if _WCSLIB_VER >= Version('7.1') and _WCSLIB_VER < Version('7.3'):\n ctx = pytest.warns(\n wcs.FITSFixedWarning,\n match=r\"'datfix' made the change 'Set DATE-REF to '1858-11-17' from MJD-REF'\\.\")\n else:\n ctx = nullcontext()\n return ctx\n\n\nclass TestMaps:\n def setup(self):\n # get the list of the hdr files that we want to test\n self._file_list = list(get_pkg_data_filenames(\n \"data/maps\", pattern=\"*.hdr\"))\n\n def test_consistency(self):\n # Check to see that we actually have the list we expect, so that we\n # do not get in a situation where the list is empty or incomplete and\n # the tests still seem to pass correctly.\n\n # how many do we expect to see?\n n_data_files = 28\n\n assert len(self._file_list) == n_data_files, (\n \"test_spectra has wrong number data files: found {}, expected \"\n \" {}\".format(len(self._file_list), n_data_files))\n\n def test_maps(self):\n for filename in self._file_list:\n # use the base name of the file, so we get more useful messages\n # for failing tests.\n filename = os.path.basename(filename)\n # Now find the associated file in the installed wcs test directory.\n header = get_pkg_data_contents(\n os.path.join(\"data\", \"maps\", filename), encoding='binary')\n # finally run the test.\n wcsobj = wcs.WCS(header)\n world = wcsobj.wcs_pix2world([[97, 97]], 1)\n assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)\n pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)\n assert_array_almost_equal(pix, [[97, 97]], decimal=0)\n\n\nclass TestSpectra:\n def setup(self):\n self._file_list = list(get_pkg_data_filenames(\"data/spectra\",\n pattern=\"*.hdr\"))\n\n def test_consistency(self):\n # Check to see that we actually have the list we expect, so that we\n # do not get in a situation where the list is empty or incomplete and\n # the tests still seem to pass correctly.\n\n # how many do we expect to see?\n n_data_files = 6\n\n assert len(self._file_list) == n_data_files, (\n \"test_spectra has wrong number data files: found {}, expected \"\n \" {}\".format(len(self._file_list), n_data_files))\n\n def test_spectra(self):\n for filename in self._file_list:\n # use the base name of the file, so we get more useful messages\n # for failing tests.\n filename = os.path.basename(filename)\n # Now find the associated file in the installed wcs test directory.\n header = get_pkg_data_contents(\n os.path.join(\"data\", \"spectra\", filename), encoding='binary')\n # finally run the test.\n if _WCSLIB_VER >= Version('7.4'):\n ctx = pytest.warns(\n wcs.FITSFixedWarning,\n match=r\"'datfix' made the change 'Set MJD-OBS to 53925\\.853472 from DATE-OBS'\\.\") # noqa\n else:\n ctx = nullcontext()\n with ctx:\n all_wcs = wcs.find_all_wcs(header)\n\n assert len(all_wcs) == 9\n\n\ndef test_fixes():\n \"\"\"\n From github issue #36\n \"\"\"\n header = get_pkg_data_contents('data/nonstandard_units.hdr', encoding='binary')\n\n with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning) as w:\n wcs.WCS(header, translate_units='dhs')\n\n if Version('7.4') <= _WCSLIB_VER < Version('7.6'):\n assert len(w) == 3\n assert \"'datfix' made the change 'Success'.\" in str(w.pop().message)\n else:\n assert len(w) == 2\n\n first_wmsg = str(w[0].message)\n assert 'unitfix' in first_wmsg and 'Hz' in first_wmsg and 'M/S' in first_wmsg\n assert 'plane angle' in str(w[1].message) and 'm/s' in str(w[1].message)\n\n\n# Ignore \"PV2_2 = 0.209028857410973 invalid keyvalue\" warning seen on Windows.\n@pytest.mark.filterwarnings(r'ignore:PV2_2')\ndef test_outside_sky():\n \"\"\"\n From github issue #107\n \"\"\"\n header = get_pkg_data_contents(\n 'data/outside_sky.hdr', encoding='binary')\n w = wcs.WCS(header)\n\n assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky\n assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky\n assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))\n\n\ndef test_pix2world():\n \"\"\"\n From github issue #1463\n \"\"\"\n # TODO: write this to test the expected output behavior of pix2world,\n # currently this just makes sure it doesn't error out in unexpected ways\n # (and compares `wcs.pc` and `result` values?)\n filename = get_pkg_data_filename('data/sip2.fits')\n with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:\n # this raises a warning unimportant for this testing the pix2world\n # FITSFixedWarning(u'The WCS transformation has more axes (2) than\n # the image it is associated with (0)')\n ww = wcs.WCS(filename)\n\n # might as well monitor for changing behavior\n if Version('7.4') <= _WCSLIB_VER < Version('7.6'):\n assert len(caught_warnings) == 2\n else:\n assert len(caught_warnings) == 1\n\n n = 3\n pixels = (np.arange(n) * np.ones((2, n))).T\n result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)\n\n # Catch #2791\n ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)\n\n # assuming that the data of sip2.fits doesn't change\n answer = np.array([[0.00024976, 0.00023018],\n [0.00023043, -0.00024997]])\n\n assert np.allclose(ww.wcs.pc, answer, atol=1.e-8)\n\n answer = np.array([[202.39265216, 47.17756518],\n [202.39335826, 47.17754619],\n [202.39406436, 47.1775272]])\n\n assert np.allclose(result, answer, atol=1.e-8, rtol=1.e-10)\n\n\ndef test_load_fits_path():\n fits_name = get_pkg_data_filename('data/sip.fits')\n with pytest.warns(wcs.FITSFixedWarning):\n wcs.WCS(fits_name)\n\n\ndef test_dict_init():\n \"\"\"\n Test that WCS can be initialized with a dict-like object\n \"\"\"\n\n # Dictionary with no actual WCS, returns identity transform\n with ctx_for_v71_dateref_warnings():\n w = wcs.WCS({})\n\n xp, yp = w.wcs_world2pix(41., 2., 1)\n\n assert_array_almost_equal_nulp(xp, 41., 10)\n assert_array_almost_equal_nulp(yp, 2., 10)\n\n # Valid WCS\n hdr = {\n 'CTYPE1': 'GLON-CAR',\n 'CTYPE2': 'GLAT-CAR',\n 'CUNIT1': 'deg',\n 'CUNIT2': 'deg',\n 'CRPIX1': 1,\n 'CRPIX2': 1,\n 'CRVAL1': 40.,\n 'CRVAL2': 0.,\n 'CDELT1': -0.1,\n 'CDELT2': 0.1\n }\n if _WCSLIB_VER >= Version('7.1'):\n hdr['DATEREF'] = '1858-11-17'\n\n if _WCSLIB_VER >= Version('7.4'):\n ctx = pytest.warns(\n wcs.wcs.FITSFixedWarning,\n match=r\"'datfix' made the change 'Set MJDREF to 0\\.000000 from DATEREF'\\.\")\n else:\n ctx = nullcontext()\n\n with ctx:\n w = wcs.WCS(hdr)\n\n xp, yp = w.wcs_world2pix(41., 2., 0)\n\n assert_array_almost_equal_nulp(xp, -10., 10)\n assert_array_almost_equal_nulp(yp, 20., 10)\n\n\ndef test_extra_kwarg():\n \"\"\"\n Issue #444\n \"\"\"\n w = wcs.WCS()\n with NumpyRNGContext(123456789):\n data = np.random.rand(100, 2)\n with pytest.raises(TypeError):\n w.wcs_pix2world(data, origin=1)\n\n\ndef test_3d_shapes():\n \"\"\"\n Issue #444\n \"\"\"\n w = wcs.WCS(naxis=3)\n with NumpyRNGContext(123456789):\n data = np.random.rand(100, 3)\n result = w.wcs_pix2world(data, 1)\n assert result.shape == (100, 3)\n result = w.wcs_pix2world(\n data[..., 0], data[..., 1], data[..., 2], 1)\n assert len(result) == 3\n\n\ndef test_preserve_shape():\n w = wcs.WCS(naxis=2)\n\n x = np.random.random((2, 3, 4))\n y = np.random.random((2, 3, 4))\n\n xw, yw = w.wcs_pix2world(x, y, 1)\n\n assert xw.shape == (2, 3, 4)\n assert yw.shape == (2, 3, 4)\n\n xp, yp = w.wcs_world2pix(x, y, 1)\n\n assert xp.shape == (2, 3, 4)\n assert yp.shape == (2, 3, 4)\n\n\ndef test_broadcasting():\n w = wcs.WCS(naxis=2)\n\n x = np.random.random((2, 3, 4))\n y = 1\n\n xp, yp = w.wcs_world2pix(x, y, 1)\n\n assert xp.shape == (2, 3, 4)\n assert yp.shape == (2, 3, 4)\n\n\ndef test_shape_mismatch():\n w = wcs.WCS(naxis=2)\n\n x = np.random.random((2, 3, 4))\n y = np.random.random((3, 2, 4))\n\n with pytest.raises(ValueError) as exc:\n xw, yw = w.wcs_pix2world(x, y, 1)\n assert exc.value.args[0] == \"Coordinate arrays are not broadcastable to each other\"\n\n with pytest.raises(ValueError) as exc:\n xp, yp = w.wcs_world2pix(x, y, 1)\n assert exc.value.args[0] == \"Coordinate arrays are not broadcastable to each other\"\n\n # There are some ambiguities that need to be worked around when\n # naxis == 1\n w = wcs.WCS(naxis=1)\n\n x = np.random.random((42, 1))\n xw = w.wcs_pix2world(x, 1)\n assert xw.shape == (42, 1)\n\n x = np.random.random((42,))\n xw, = w.wcs_pix2world(x, 1)\n assert xw.shape == (42,)\n\n\ndef test_invalid_shape():\n # Issue #1395\n w = wcs.WCS(naxis=2)\n\n xy = np.random.random((2, 3))\n with pytest.raises(ValueError) as exc:\n w.wcs_pix2world(xy, 1)\n assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'\n\n xy = np.random.random((2, 1))\n with pytest.raises(ValueError) as exc:\n w.wcs_pix2world(xy, 1)\n assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'\n\n\ndef test_warning_about_defunct_keywords():\n header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')\n if Version('7.4') <= _WCSLIB_VER < Version('7.6'):\n n_warn = 5\n else:\n n_warn = 4\n\n # Make sure the warnings come out every time...\n for _ in range(2):\n with pytest.warns(wcs.FITSFixedWarning) as w:\n wcs.WCS(header)\n\n assert len(w) == n_warn\n # 7.4 adds a fifth warning \"'datfix' made the change 'Success'.\"\n for item in w[:4]:\n assert 'PCi_ja' in str(item.message)\n\n\ndef test_warning_about_defunct_keywords_exception():\n header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')\n with pytest.warns(wcs.FITSFixedWarning):\n wcs.WCS(header)\n\n\ndef test_to_header_string():\n hdrstr = (\n \"WCSAXES = 2 / Number of coordinate axes \",\n \"CRPIX1 = 0.0 / Pixel coordinate of reference point \",\n \"CRPIX2 = 0.0 / Pixel coordinate of reference point \",\n \"CDELT1 = 1.0 / Coordinate increment at reference point \",\n \"CDELT2 = 1.0 / Coordinate increment at reference point \",\n \"CRVAL1 = 0.0 / Coordinate value at reference point \",\n \"CRVAL2 = 0.0 / Coordinate value at reference point \",\n \"LATPOLE = 90.0 / [deg] Native latitude of celestial pole \",\n )\n\n if _WCSLIB_VER >= Version('7.3'):\n hdrstr += (\n \"MJDREF = 0.0 / [d] MJD of fiducial time \",\n )\n\n elif _WCSLIB_VER >= Version('7.1'):\n hdrstr += (\n \"DATEREF = '1858-11-17' / ISO-8601 fiducial time \",\n \"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part \",\n \"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part \"\n )\n\n hdrstr += (\"END\", )\n\n header_string = ''.join(hdrstr)\n\n w = wcs.WCS()\n h0 = fits.Header.fromstring(w.to_header_string().strip())\n if 'COMMENT' in h0:\n del h0['COMMENT']\n if '' in h0:\n del h0['']\n h1 = fits.Header.fromstring(header_string.strip())\n assert dict(h0) == dict(h1)\n\n\ndef test_to_fits():\n nrec = 11 if _WCSLIB_VER >= Version('7.1') else 8\n if _WCSLIB_VER < Version('7.1'):\n nrec = 8\n elif _WCSLIB_VER < Version('7.3'):\n nrec = 11\n else:\n nrec = 9\n\n w = wcs.WCS()\n header_string = w.to_header()\n wfits = w.to_fits()\n assert isinstance(wfits, fits.HDUList)\n assert isinstance(wfits[0], fits.PrimaryHDU)\n assert header_string == wfits[0].header[-nrec:]\n\n\ndef test_to_header_warning():\n fits_name = get_pkg_data_filename('data/sip.fits')\n with pytest.warns(wcs.FITSFixedWarning):\n x = wcs.WCS(fits_name)\n with pytest.warns(AstropyWarning, match='A_ORDER') as w:\n x.to_header()\n assert len(w) == 1\n\n\ndef test_no_comments_in_header():\n w = wcs.WCS()\n header = w.to_header()\n assert w.wcs.alt not in header\n assert 'COMMENT' + w.wcs.alt.strip() not in header\n assert 'COMMENT' not in header\n wkey = 'P'\n header = w.to_header(key=wkey)\n assert wkey not in header\n assert 'COMMENT' not in header\n assert 'COMMENT' + w.wcs.alt.strip() not in header\n\n\ndef test_find_all_wcs_crash():\n \"\"\"\n Causes a double free without a recent fix in wcslib_wrap.C\n \"\"\"\n with open(get_pkg_data_filename(\"data/too_many_pv.hdr\")) as fd:\n header = fd.read()\n # We have to set fix=False here, because one of the fixing tasks is to\n # remove redundant SCAMP distortion parameters when SIP distortion\n # parameters are also present.\n with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):\n wcs.find_all_wcs(header, fix=False)\n\n\n# NOTE: Warning bubbles up from C layer during wcs.validate() and\n# is hard to catch, so we just ignore it.\n@pytest.mark.filterwarnings(\"ignore\")\ndef test_validate():\n results = wcs.validate(get_pkg_data_filename(\"data/validate.fits\"))\n results_txt = sorted({x.strip() for x in repr(results).splitlines()})\n if _WCSLIB_VER >= Version('7.6'):\n filename = 'data/validate.7.6.txt'\n elif _WCSLIB_VER >= Version('7.4'):\n filename = 'data/validate.7.4.txt'\n elif _WCSLIB_VER >= Version('6.0'):\n filename = 'data/validate.6.txt'\n elif _WCSLIB_VER >= Version('5.13'):\n filename = 'data/validate.5.13.txt'\n elif _WCSLIB_VER >= Version('5.0'):\n filename = 'data/validate.5.0.txt'\n else:\n filename = 'data/validate.txt'\n with open(get_pkg_data_filename(filename)) as fd:\n lines = fd.readlines()\n assert sorted({x.strip() for x in lines}) == results_txt\n\n\ndef test_validate_with_2_wcses():\n # From Issue #2053\n with pytest.warns(AstropyUserWarning):\n results = wcs.validate(get_pkg_data_filename(\"data/2wcses.hdr\"))\n\n assert \"WCS key 'A':\" in str(results)\n\n\ndef test_crpix_maps_to_crval():\n twcs = wcs.WCS(naxis=2)\n twcs.wcs.crval = [251.29, 57.58]\n twcs.wcs.cdelt = [1, 1]\n twcs.wcs.crpix = [507, 507]\n twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])\n twcs._naxis = [1014, 1014]\n twcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']\n a = np.array(\n [[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],\n [0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],\n [-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],\n [-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],\n [-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n b = np.array(\n [[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],\n [0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],\n [6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],\n [3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],\n [-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)\n twcs.wcs.set()\n pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))\n\n # test that CRPIX maps to CRVAL:\n assert_allclose(\n twcs.wcs_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,\n rtol=0.0, atol=1e-6 * pscale\n )\n\n # test that CRPIX maps to CRVAL:\n assert_allclose(\n twcs.all_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,\n rtol=0.0, atol=1e-6 * pscale\n )\n\n\ndef test_all_world2pix(fname=None, ext=0,\n tolerance=1.0e-4, origin=0,\n random_npts=25000,\n adaptive=False, maxiter=20,\n detect_divergence=True):\n \"\"\"Test all_world2pix, iterative inverse of all_pix2world\"\"\"\n\n # Open test FITS file:\n if fname is None:\n fname = get_pkg_data_filename('data/j94f05bgq_flt.fits')\n ext = ('SCI', 1)\n if not os.path.isfile(fname):\n raise OSError(f\"Input file '{fname:s}' to 'test_all_world2pix' not found.\")\n h = fits.open(fname)\n w = wcs.WCS(h[ext].header, h)\n h.close()\n del h\n\n crpix = w.wcs.crpix\n ncoord = crpix.shape[0]\n\n # Assume that CRPIX is at the center of the image and that the image has\n # a power-of-2 number of pixels along each axis. Only use the central\n # 1/64 for this testing purpose:\n naxesi_l = list((7. / 16 * crpix).astype(int))\n naxesi_u = list((9. / 16 * crpix).astype(int))\n\n # Generate integer indices of pixels (image grid):\n img_pix = np.dstack([i.flatten() for i in\n np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0]\n\n # Generage random data (in image coordinates):\n with NumpyRNGContext(123456789):\n rnd_pix = np.random.rand(random_npts, ncoord)\n\n # Scale random data to cover the central part of the image\n mwidth = 2 * (crpix * 1. / 8)\n rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix\n\n # Reference pixel coordinates in image coordinate system (CS):\n test_pix = np.append(img_pix, rnd_pix, axis=0)\n # Reference pixel coordinates in sky CS using forward transformation:\n all_world = w.all_pix2world(test_pix, origin)\n\n try:\n runtime_begin = datetime.now()\n # Apply the inverse iterative process to pixels in world coordinates\n # to recover the pixel coordinates in image space.\n all_pix = w.all_world2pix(\n all_world, origin, tolerance=tolerance, adaptive=adaptive,\n maxiter=maxiter, detect_divergence=detect_divergence)\n runtime_end = datetime.now()\n except wcs.wcs.NoConvergence as e:\n runtime_end = datetime.now()\n ndiv = 0\n if e.divergent is not None:\n ndiv = e.divergent.shape[0]\n print(f\"There are {ndiv} diverging solutions.\")\n print(f\"Indices of diverging solutions:\\n{e.divergent}\")\n print(f\"Diverging solutions:\\n{e.best_solution[e.divergent]}\\n\")\n print(\"Mean radius of the diverging solutions: {}\"\n .format(np.mean(\n np.linalg.norm(e.best_solution[e.divergent], axis=1))))\n print(\"Mean accuracy of the diverging solutions: {}\\n\"\n .format(np.mean(\n np.linalg.norm(e.accuracy[e.divergent], axis=1))))\n else:\n print(\"There are no diverging solutions.\")\n\n nslow = 0\n if e.slow_conv is not None:\n nslow = e.slow_conv.shape[0]\n print(f\"There are {nslow} slowly converging solutions.\")\n print(f\"Indices of slowly converging solutions:\\n{e.slow_conv}\")\n print(f\"Slowly converging solutions:\\n{e.best_solution[e.slow_conv]}\\n\")\n else:\n print(\"There are no slowly converging solutions.\\n\")\n\n print(\"There are {} converged solutions.\"\n .format(e.best_solution.shape[0] - ndiv - nslow))\n print(f\"Best solutions (all points):\\n{e.best_solution}\")\n print(f\"Accuracy:\\n{e.accuracy}\\n\")\n print(\"\\nFinished running 'test_all_world2pix' with errors.\\n\"\n \"ERROR: {}\\nRun time: {}\\n\"\n .format(e.args[0], runtime_end - runtime_begin))\n raise e\n\n # Compute differences between reference pixel coordinates and\n # pixel coordinates (in image space) recovered from reference\n # pixels in world coordinates:\n errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))\n meanerr = np.mean(errors)\n maxerr = np.amax(errors)\n print(\"\\nFinished running 'test_all_world2pix'.\\n\"\n \"Mean error = {:e} (Max error = {:e})\\n\"\n \"Run time: {}\\n\"\n .format(meanerr, maxerr, runtime_end - runtime_begin))\n\n assert(maxerr < 2.0 * tolerance)\n\n\ndef test_scamp_sip_distortion_parameters():\n \"\"\"\n Test parsing of WCS parameters with redundant SIP and SCAMP distortion\n parameters.\n \"\"\"\n header = get_pkg_data_contents('data/validate.fits', encoding='binary')\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(header)\n # Just check that this doesn't raise an exception.\n w.all_pix2world(0, 0, 0)\n\n\ndef test_fixes2():\n \"\"\"\n From github issue #1854\n \"\"\"\n header = get_pkg_data_contents(\n 'data/nonstandard_units.hdr', encoding='binary')\n with pytest.raises(wcs.InvalidTransformError):\n wcs.WCS(header, fix=False)\n\n\ndef test_unit_normalization():\n \"\"\"\n From github issue #1918\n \"\"\"\n header = get_pkg_data_contents(\n 'data/unit.hdr', encoding='binary')\n w = wcs.WCS(header)\n assert w.wcs.cunit[2] == 'm/s'\n\n\ndef test_footprint_to_file(tmpdir):\n \"\"\"\n From github issue #1912\n \"\"\"\n # Arbitrary keywords from real data\n hdr = {'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg',\n 'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02,\n 'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg',\n 'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01,\n 'PV2_1': 1., 'PV2_3': 220., 'NAXIS1': 2048, 'NAXIS2': 1024}\n w = wcs.WCS(hdr)\n\n testfile = str(tmpdir.join('test.txt'))\n w.footprint_to_file(testfile)\n\n with open(testfile) as f:\n lines = f.readlines()\n\n assert len(lines) == 4\n assert lines[2] == 'ICRS\\n'\n assert 'color=green' in lines[3]\n\n w.footprint_to_file(testfile, coordsys='FK5', color='red')\n\n with open(testfile) as f:\n lines = f.readlines()\n\n assert len(lines) == 4\n assert lines[2] == 'FK5\\n'\n assert 'color=red' in lines[3]\n\n with pytest.raises(ValueError):\n w.footprint_to_file(testfile, coordsys='FOO')\n\n del hdr['NAXIS1']\n del hdr['NAXIS2']\n w = wcs.WCS(hdr)\n with pytest.warns(AstropyUserWarning):\n w.footprint_to_file(testfile)\n\n\n# Ignore FITSFixedWarning about keyrecords following the END keyrecord were\n# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this\n# seems to work when pytest warnings are turned into exceptions.\n@pytest.mark.filterwarnings('ignore')\ndef test_validate_faulty_wcs():\n \"\"\"\n From github issue #2053\n \"\"\"\n h = fits.Header()\n # Illegal WCS:\n h['RADESYSA'] = 'ICRS'\n h['PV2_1'] = 1.0\n hdu = fits.PrimaryHDU([[0]], header=h)\n hdulist = fits.HDUList([hdu])\n # Check that this doesn't raise a NameError exception\n wcs.validate(hdulist)\n\n\ndef test_error_message():\n header = get_pkg_data_contents(\n 'data/invalid_header.hdr', encoding='binary')\n\n with pytest.raises(wcs.InvalidTransformError):\n # Both lines are in here, because 0.4 calls .set within WCS.__init__,\n # whereas 0.3 and earlier did not.\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(header, _do_set=False)\n w.all_pix2world([[536.0, 894.0]], 0)\n\n\ndef test_out_of_bounds():\n # See #2107\n header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary')\n w = wcs.WCS(header)\n\n ra, dec = w.wcs_pix2world(110, 110, 0)\n\n assert np.isnan(ra)\n assert np.isnan(dec)\n\n ra, dec = w.wcs_pix2world(0, 0, 0)\n\n assert not np.isnan(ra)\n assert not np.isnan(dec)\n\n\ndef test_calc_footprint_1():\n fits = get_pkg_data_filename('data/sip.fits')\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(fits)\n\n axes = (1000, 1051)\n ref = np.array([[202.39314493, 47.17753352],\n [202.71885939, 46.94630488],\n [202.94631893, 47.15855022],\n [202.72053428, 47.37893142]])\n footprint = w.calc_footprint(axes=axes)\n assert_allclose(footprint, ref)\n\n\ndef test_calc_footprint_2():\n \"\"\" Test calc_footprint without distortion. \"\"\"\n fits = get_pkg_data_filename('data/sip.fits')\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(fits)\n\n axes = (1000, 1051)\n ref = np.array([[202.39265216, 47.17756518],\n [202.7469062, 46.91483312],\n [203.11487481, 47.14359319],\n [202.76092671, 47.40745948]])\n footprint = w.calc_footprint(axes=axes, undistort=False)\n assert_allclose(footprint, ref)\n\n\ndef test_calc_footprint_3():\n \"\"\" Test calc_footprint with corner of the pixel.\"\"\"\n w = wcs.WCS()\n w.wcs.ctype = [\"GLON-CAR\", \"GLAT-CAR\"]\n w.wcs.crpix = [1.5, 5.5]\n w.wcs.cdelt = [-0.1, 0.1]\n axes = (2, 10)\n ref = np.array([[0.1, -0.5],\n [0.1, 0.5],\n [359.9, 0.5],\n [359.9, -0.5]])\n\n footprint = w.calc_footprint(axes=axes, undistort=False, center=False)\n assert_allclose(footprint, ref)\n\n\ndef test_sip():\n # See #2107\n header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')\n w = wcs.WCS(header)\n\n x0, y0 = w.sip_pix2foc(200, 200, 0)\n\n assert_allclose(72, x0, 1e-3)\n assert_allclose(72, y0, 1e-3)\n\n x1, y1 = w.sip_foc2pix(x0, y0, 0)\n\n assert_allclose(200, x1, 1e-3)\n assert_allclose(200, y1, 1e-3)\n\n\ndef test_sub_3d_with_sip():\n # See #10527\n header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')\n header = fits.Header.fromstring(header)\n header['NAXIS'] = 3\n header.set('NAXIS3', 64, after=header.index('NAXIS2'))\n w = wcs.WCS(header, naxis=2)\n assert w.naxis == 2\n\n\ndef test_printwcs(capsys):\n \"\"\"\n Just make sure that it runs\n \"\"\"\n h = get_pkg_data_contents(\n 'data/spectra/orion-freq-1.hdr', encoding='binary')\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(h)\n w.printwcs()\n captured = capsys.readouterr()\n assert 'WCS Keywords' in captured.out\n h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')\n w = wcs.WCS(h)\n w.printwcs()\n captured = capsys.readouterr()\n assert 'WCS Keywords' in captured.out\n\n\ndef test_invalid_spherical():\n header = \"\"\"\nSIMPLE = T / conforms to FITS standard\nBITPIX = 8 / array data type\nWCSAXES = 2 / no comment\nCTYPE1 = 'RA---TAN' / TAN (gnomic) projection\nCTYPE2 = 'DEC--TAN' / TAN (gnomic) projection\nEQUINOX = 2000.0 / Equatorial coordinates definition (yr)\nLONPOLE = 180.0 / no comment\nLATPOLE = 0.0 / no comment\nCRVAL1 = 16.0531567459 / RA of reference point\nCRVAL2 = 23.1148929108 / DEC of reference point\nCRPIX1 = 2129 / X reference pixel\nCRPIX2 = 1417 / Y reference pixel\nCUNIT1 = 'deg ' / X pixel scale units\nCUNIT2 = 'deg ' / Y pixel scale units\nCD1_1 = -0.00912247310646 / Transformation matrix\nCD1_2 = -0.00250608809647 / no comment\nCD2_1 = 0.00250608809647 / no comment\nCD2_2 = -0.00912247310646 / no comment\nIMAGEW = 4256 / Image width, in pixels.\nIMAGEH = 2832 / Image height, in pixels.\n \"\"\"\n\n f = io.StringIO(header)\n header = fits.Header.fromtextfile(f)\n\n w = wcs.WCS(header)\n x, y = w.wcs_world2pix(211, -26, 0)\n assert np.isnan(x) and np.isnan(y)\n\n\ndef test_no_iteration():\n\n # Regression test for #3066\n\n w = wcs.WCS(naxis=2)\n\n with pytest.raises(TypeError) as exc:\n iter(w)\n assert exc.value.args[0] == \"'WCS' object is not iterable\"\n\n class NewWCS(wcs.WCS):\n pass\n\n w = NewWCS(naxis=2)\n\n with pytest.raises(TypeError) as exc:\n iter(w)\n assert exc.value.args[0] == \"'NewWCS' object is not iterable\"\n\n\n@pytest.mark.skipif('_wcs.__version__[0] < \"5\"',\n reason=\"TPV only works with wcslib 5.x or later\")\ndef test_sip_tpv_agreement():\n sip_header = get_pkg_data_contents(\n os.path.join(\"data\", \"siponly.hdr\"), encoding='binary')\n tpv_header = get_pkg_data_contents(\n os.path.join(\"data\", \"tpvonly.hdr\"), encoding='binary')\n\n with pytest.warns(wcs.FITSFixedWarning):\n w_sip = wcs.WCS(sip_header)\n w_tpv = wcs.WCS(tpv_header)\n\n assert_array_almost_equal(\n w_sip.all_pix2world([w_sip.wcs.crpix], 1),\n w_tpv.all_pix2world([w_tpv.wcs.crpix], 1))\n\n w_sip2 = wcs.WCS(w_sip.to_header())\n w_tpv2 = wcs.WCS(w_tpv.to_header())\n\n assert_array_almost_equal(\n w_sip.all_pix2world([w_sip.wcs.crpix], 1),\n w_sip2.all_pix2world([w_sip.wcs.crpix], 1))\n assert_array_almost_equal(\n w_tpv.all_pix2world([w_sip.wcs.crpix], 1),\n w_tpv2.all_pix2world([w_sip.wcs.crpix], 1))\n assert_array_almost_equal(\n w_sip2.all_pix2world([w_sip.wcs.crpix], 1),\n w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1))\n\n\n@pytest.mark.skipif('_wcs.__version__[0] < \"5\"',\n reason=\"TPV only works with wcslib 5.x or later\")\ndef test_tpv_copy():\n # See #3904\n\n tpv_header = get_pkg_data_contents(\n os.path.join(\"data\", \"tpvonly.hdr\"), encoding='binary')\n\n with pytest.warns(wcs.FITSFixedWarning):\n w_tpv = wcs.WCS(tpv_header)\n\n ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)\n assert ra[0] != ra[1] and ra[1] != ra[2]\n assert dec[0] != dec[1] and dec[1] != dec[2]\n\n\ndef test_hst_wcs():\n path = get_pkg_data_filename(\"data/dist_lookup.fits.gz\")\n\n with fits.open(path) as hdulist:\n # wcslib will complain about the distortion parameters if they\n # weren't correctly deleted from the header\n w = wcs.WCS(hdulist[1].header, hdulist)\n\n # Check pixel scale and area\n assert_quantity_allclose(\n w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg)\n assert_quantity_allclose(\n w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg))\n\n # Exercise the main transformation functions, mainly just for\n # coverage\n w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)\n w.det2im([0, 100, 200], [0, -100, 200], 0)\n\n w.cpdis1 = w.cpdis1\n w.cpdis2 = w.cpdis2\n\n w.det2im1 = w.det2im1\n w.det2im2 = w.det2im2\n\n w.sip = w.sip\n\n w.cpdis1.cdelt = w.cpdis1.cdelt\n w.cpdis1.crpix = w.cpdis1.crpix\n w.cpdis1.crval = w.cpdis1.crval\n w.cpdis1.data = w.cpdis1.data\n\n assert w.sip.a_order == 4\n assert w.sip.b_order == 4\n assert w.sip.ap_order == 0\n assert w.sip.bp_order == 0\n assert_array_equal(w.sip.crpix, [2048., 1024.])\n wcs.WCS(hdulist[1].header, hdulist)\n\n\ndef test_cpdis_comments():\n path = get_pkg_data_filename(\"data/dist_lookup.fits.gz\")\n\n f = fits.open(path)\n w = wcs.WCS(f[1].header, f)\n hdr = w.to_fits()[0].header\n f.close()\n\n wcscards = list(hdr['CPDIS*'].cards) + list(hdr['DP*'].cards)\n wcsdict = {k: (v, c) for k, v, c in wcscards}\n\n refcards = [\n ('CPDIS1', 'LOOKUP', 'Prior distortion function type'),\n ('DP1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),\n ('DP1.NAXES', 2.0, 'Number of independent variables in CPDIS function'),\n ('DP1.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),\n ('DP1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),\n ('CPDIS2', 'LOOKUP', 'Prior distortion function type'),\n ('DP2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),\n ('DP2.NAXES', 2.0, 'Number of independent variables in CPDIS function'),\n ('DP2.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),\n ('DP2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),\n ]\n\n assert len(wcsdict) == len(refcards)\n\n for k, v, c in refcards:\n assert wcsdict[k] == (v, c)\n\n\ndef test_d2im_comments():\n path = get_pkg_data_filename(\"data/ie6d07ujq_wcs.fits\")\n\n f = fits.open(path)\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(f[0].header, f)\n f.close()\n wcscards = list(w.to_fits()[0].header['D2IM*'].cards)\n wcsdict = {k: (v, c) for k, v, c in wcscards}\n\n refcards = [\n ('D2IMDIS1', 'LOOKUP', 'Detector to image correction type'),\n ('D2IM1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),\n ('D2IM1.NAXES', 2.0, 'Number of independent variables in D2IM function'),\n ('D2IM1.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),\n ('D2IM1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),\n ('D2IMDIS2', 'LOOKUP', 'Detector to image correction type'),\n ('D2IM2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),\n ('D2IM2.NAXES', 2.0, 'Number of independent variables in D2IM function'),\n ('D2IM2.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),\n ('D2IM2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),\n # ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),\n # ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),\n # ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),\n ]\n\n assert len(wcsdict) == len(refcards)\n\n for k, v, c in refcards:\n assert wcsdict[k] == (v, c)\n\n\ndef test_sip_broken():\n # This header caused wcslib to segfault because it has a SIP\n # specification in a non-default keyword\n hdr = get_pkg_data_contents(\"data/sip-broken.hdr\")\n\n wcs.WCS(hdr)\n\n\ndef test_no_truncate_crval():\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/4612\n \"\"\"\n w = wcs.WCS(naxis=3)\n w.wcs.crval = [50, 50, 2.12345678e11]\n w.wcs.cdelt = [1e-3, 1e-3, 1e8]\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']\n w.wcs.set()\n\n header = w.to_header()\n for ii in range(3):\n assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]\n assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]\n\n\ndef test_no_truncate_crval_try2():\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/4612\n \"\"\"\n w = wcs.WCS(naxis=3)\n w.wcs.crval = [50, 50, 2.12345678e11]\n w.wcs.cdelt = [1e-5, 1e-5, 1e5]\n w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ']\n w.wcs.cunit = ['deg', 'deg', 'Hz']\n w.wcs.crpix = [1, 1, 1]\n w.wcs.restfrq = 2.34e11\n w.wcs.set()\n\n header = w.to_header()\n for ii in range(3):\n assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]\n assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]\n\n\ndef test_no_truncate_crval_p17():\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/5162\n \"\"\"\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [50.1234567890123456, 50.1234567890123456]\n w.wcs.cdelt = [1e-3, 1e-3]\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n w.wcs.set()\n\n header = w.to_header()\n assert header['CRVAL1'] != w.wcs.crval[0]\n assert header['CRVAL2'] != w.wcs.crval[1]\n header = w.to_header(relax=wcs.WCSHDO_P17)\n assert header['CRVAL1'] == w.wcs.crval[0]\n assert header['CRVAL2'] == w.wcs.crval[1]\n\n\ndef test_no_truncate_using_compare():\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/4612\n\n This one uses WCS.wcs.compare and some slightly different values\n \"\"\"\n w = wcs.WCS(naxis=3)\n w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11]\n w.wcs.cdelt = [1e-3, 1e-3, 1e8]\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']\n w.wcs.set()\n w2 = wcs.WCS(w.to_header())\n w.wcs.compare(w2.wcs)\n\n\ndef test_passing_ImageHDU():\n \"\"\"\n Passing ImageHDU or PrimaryHDU and comparing it with\n wcs initialized from header. For #4493.\n \"\"\"\n path = get_pkg_data_filename('data/validate.fits')\n with fits.open(path) as hdulist:\n with pytest.warns(wcs.FITSFixedWarning):\n wcs_hdu = wcs.WCS(hdulist[0])\n wcs_header = wcs.WCS(hdulist[0].header)\n assert wcs_hdu.wcs.compare(wcs_header.wcs)\n wcs_hdu = wcs.WCS(hdulist[1])\n wcs_header = wcs.WCS(hdulist[1].header)\n assert wcs_hdu.wcs.compare(wcs_header.wcs)\n\n\ndef test_inconsistent_sip():\n \"\"\"\n Test for #4814\n \"\"\"\n hdr = get_pkg_data_contents(\"data/sip-broken.hdr\")\n ctx = ctx_for_v71_dateref_warnings()\n with ctx:\n w = wcs.WCS(hdr)\n with pytest.warns(AstropyWarning):\n newhdr = w.to_header(relax=None)\n # CTYPE should not include \"-SIP\" if relax is None\n with ctx:\n wnew = wcs.WCS(newhdr)\n assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)\n newhdr = w.to_header(relax=False)\n assert 'A_0_2' not in newhdr\n # CTYPE should not include \"-SIP\" if relax is False\n with ctx:\n wnew = wcs.WCS(newhdr)\n assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)\n with pytest.warns(AstropyWarning):\n newhdr = w.to_header(key=\"C\")\n assert 'A_0_2' not in newhdr\n # Test writing header with a different key\n with ctx:\n wnew = wcs.WCS(newhdr, key='C')\n assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)\n with pytest.warns(AstropyWarning):\n newhdr = w.to_header(key=\" \")\n # Test writing a primary WCS to header\n with ctx:\n wnew = wcs.WCS(newhdr)\n assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)\n # Test that \"-SIP\" is kept into CTYPE if relax=True and\n # \"-SIP\" was in the original header\n newhdr = w.to_header(relax=True)\n with ctx:\n wnew = wcs.WCS(newhdr)\n assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)\n assert 'A_0_2' in newhdr\n # Test that SIP coefficients are also written out.\n assert wnew.sip is not None\n # ######### broken header ###########\n # Test that \"-SIP\" is added to CTYPE if relax=True and\n # \"-SIP\" was not in the original header but SIP coefficients\n # are present.\n with ctx:\n w = wcs.WCS(hdr)\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n newhdr = w.to_header(relax=True)\n with ctx:\n wnew = wcs.WCS(newhdr)\n assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)\n\n\ndef test_bounds_check():\n \"\"\"Test for #4957\"\"\"\n w = wcs.WCS(naxis=2)\n w.wcs.ctype = [\"RA---CAR\", \"DEC--CAR\"]\n w.wcs.cdelt = [10, 10]\n w.wcs.crval = [-90, 90]\n w.wcs.crpix = [1, 1]\n w.wcs.bounds_check(False, False)\n ra, dec = w.wcs_pix2world(300, 0, 0)\n assert_allclose(ra, -180)\n assert_allclose(dec, -30)\n\n\ndef test_naxis():\n w = wcs.WCS(naxis=2)\n w.wcs.crval = [1, 1]\n w.wcs.cdelt = [0.1, 0.1]\n w.wcs.crpix = [1, 1]\n w._naxis = [1000, 500]\n assert w.pixel_shape == (1000, 500)\n assert w.array_shape == (500, 1000)\n\n w.pixel_shape = (99, 59)\n assert w._naxis == [99, 59]\n\n w.array_shape = (45, 23)\n assert w._naxis == [23, 45]\n assert w.pixel_shape == (23, 45)\n\n w.pixel_shape = None\n assert w.pixel_bounds is None\n\n\ndef test_sip_with_altkey():\n \"\"\"\n Test that when creating a WCS object using a key, CTYPE with\n that key is looked at and not the primary CTYPE.\n fix for #5443.\n \"\"\"\n with fits.open(get_pkg_data_filename('data/sip.fits')) as f:\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(f[0].header)\n # create a header with two WCSs.\n h1 = w.to_header(relax=True, key='A')\n h2 = w.to_header(relax=False)\n h1['CTYPE1A'] = \"RA---SIN-SIP\"\n h1['CTYPE2A'] = \"DEC--SIN-SIP\"\n h1.update(h2)\n with ctx_for_v71_dateref_warnings():\n w = wcs.WCS(h1, key='A')\n assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all()\n\n\ndef test_to_fits_1():\n \"\"\"\n Test to_fits() with LookupTable distortion.\n \"\"\"\n fits_name = get_pkg_data_filename('data/dist.fits')\n with pytest.warns(AstropyDeprecationWarning):\n w = wcs.WCS(fits_name)\n wfits = w.to_fits()\n assert isinstance(wfits, fits.HDUList)\n assert isinstance(wfits[0], fits.PrimaryHDU)\n assert isinstance(wfits[1], fits.ImageHDU)\n\n\ndef test_keyedsip():\n \"\"\"\n Test sip reading with extra key.\n \"\"\"\n hdr_name = get_pkg_data_filename('data/sip-broken.hdr')\n header = fits.Header.fromfile(hdr_name)\n del header[\"CRPIX1\"]\n del header[\"CRPIX2\"]\n\n w = wcs.WCS(header=header, key=\"A\")\n assert isinstance(w.sip, wcs.Sip)\n assert w.sip.crpix[0] == 2048\n assert w.sip.crpix[1] == 1026\n\n\ndef test_zero_size_input():\n with fits.open(get_pkg_data_filename('data/sip.fits')) as f:\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(f[0].header)\n\n inp = np.zeros((0, 2))\n assert_array_equal(inp, w.all_pix2world(inp, 0))\n assert_array_equal(inp, w.all_world2pix(inp, 0))\n\n inp = [], [1]\n result = w.all_pix2world([], [1], 0)\n assert_array_equal(inp[0], result[0])\n assert_array_equal(inp[1], result[1])\n\n result = w.all_world2pix([], [1], 0)\n assert_array_equal(inp[0], result[0])\n assert_array_equal(inp[1], result[1])\n\n\ndef test_scalar_inputs():\n \"\"\"\n Issue #7845\n \"\"\"\n wcsobj = wcs.WCS(naxis=1)\n result = wcsobj.all_pix2world(2, 1)\n assert_array_equal(result, [np.array(2.)])\n assert result[0].shape == ()\n\n result = wcsobj.all_pix2world([2], 1)\n assert_array_equal(result, [np.array([2.])])\n assert result[0].shape == (1,)\n\n\n# Ignore RuntimeWarning raised on s390.\n@pytest.mark.filterwarnings('ignore:.*invalid value encountered in.*')\ndef test_footprint_contains():\n \"\"\"\n Test WCS.footprint_contains(skycoord)\n \"\"\"\n\n header = \"\"\"\nWCSAXES = 2 / Number of coordinate axes\nCRPIX1 = 1045.0 / Pixel coordinate of reference point\nCRPIX2 = 1001.0 / Pixel coordinate of reference point\nPC1_1 = -0.00556448550786 / Coordinate transformation matrix element\nPC1_2 = -0.001042120133257 / Coordinate transformation matrix element\nPC2_1 = 0.001181477028705 / Coordinate transformation matrix element\nPC2_2 = -0.005590809742987 / Coordinate transformation matrix element\nCDELT1 = 1.0 / [deg] Coordinate increment at reference point\nCDELT2 = 1.0 / [deg] Coordinate increment at reference point\nCUNIT1 = 'deg' / Units of coordinate increment and value\nCUNIT2 = 'deg' / Units of coordinate increment and value\nCTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions\nCTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions\nCRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point\nCRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point\nLONPOLE = 180.0 / [deg] Native longitude of celestial pole\nLATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole\nRADESYS = 'ICRS' / Equatorial coordinate system\nMJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS\nDATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB\nNAXIS = 2 / NAXIS\nNAXIS1 = 2136 / length of first array dimension\nNAXIS2 = 2078 / length of second array dimension\n \"\"\" # noqa\n\n header = fits.Header.fromstring(header.strip(), '\\n')\n test_wcs = wcs.WCS(header)\n\n hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit='deg'))\n assert hasCoord\n\n hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit='deg'))\n assert not hasCoord\n\n hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit='deg'))\n assert not hasCoord\n\n\ndef test_cunit():\n # Initializing WCS\n w1 = wcs.WCS(naxis=2)\n w2 = wcs.WCS(naxis=2)\n w3 = wcs.WCS(naxis=2)\n w4 = wcs.WCS(naxis=2)\n # Initializing the values of cunit\n w1.wcs.cunit = ['deg', 'm/s']\n w2.wcs.cunit = ['km/h', 'km/h']\n w3.wcs.cunit = ['deg', 'm/s']\n w4.wcs.cunit = ['deg', 'deg']\n\n # Equality checking a cunit with itself\n assert w1.wcs.cunit == w1.wcs.cunit\n assert not w1.wcs.cunit != w1.wcs.cunit\n # Equality checking of two different cunit object having same values\n assert w1.wcs.cunit == w3.wcs.cunit\n assert not w1.wcs.cunit != w3.wcs.cunit\n # Equality checking of two different cunit object having the same first unit\n # but different second unit (see #9154)\n assert not w1.wcs.cunit == w4.wcs.cunit\n assert w1.wcs.cunit != w4.wcs.cunit\n # Inequality checking of two different cunit object having different values\n assert not w1.wcs.cunit == w2.wcs.cunit\n assert w1.wcs.cunit != w2.wcs.cunit\n # Inequality checking of cunit with a list of literals\n assert not w1.wcs.cunit == [1, 2, 3]\n assert w1.wcs.cunit != [1, 2, 3]\n # Inequality checking with some characters\n assert not w1.wcs.cunit == ['a', 'b', 'c']\n assert w1.wcs.cunit != ['a', 'b', 'c']\n # Comparison is not implemented TypeError will raise\n with pytest.raises(TypeError):\n w1.wcs.cunit < w2.wcs.cunit\n\n\nclass TestWcsWithTime:\n def setup(self):\n if _WCSLIB_VER >= Version('7.1'):\n fname = get_pkg_data_filename('data/header_with_time_wcslib71.fits')\n else:\n fname = get_pkg_data_filename('data/header_with_time.fits')\n self.header = fits.Header.fromfile(fname)\n with pytest.warns(wcs.FITSFixedWarning):\n self.w = wcs.WCS(self.header, key='A')\n\n def test_keywods2wcsprm(self):\n \"\"\" Make sure Wcsprm is populated correctly from the header.\"\"\"\n\n ctype = [self.header[val] for val in self.header[\"CTYPE*\"]]\n crval = [self.header[val] for val in self.header[\"CRVAL*\"]]\n crpix = [self.header[val] for val in self.header[\"CRPIX*\"]]\n cdelt = [self.header[val] for val in self.header[\"CDELT*\"]]\n cunit = [self.header[val] for val in self.header[\"CUNIT*\"]]\n assert list(self.w.wcs.ctype) == ctype\n time_axis_code = 4000 if _WCSLIB_VER >= Version('7.9') else 0\n assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]\n assert_allclose(self.w.wcs.crval, crval)\n assert_allclose(self.w.wcs.crpix, crpix)\n assert_allclose(self.w.wcs.cdelt, cdelt)\n assert list(self.w.wcs.cunit) == cunit\n\n naxis = self.w.naxis\n assert naxis == 4\n pc = np.zeros((naxis, naxis), dtype=np.float64)\n for i in range(1, 5):\n for j in range(1, 5):\n if i == j:\n pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 1)\n else:\n pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 0)\n assert_allclose(self.w.wcs.pc, pc)\n\n char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',\n 'dateref', 'dateobs', 'datebeg', 'dateavg', 'dateend']\n for key in char_keys:\n assert getattr(self.w.wcs, key) == self.header.get(key, \"\")\n\n num_keys = ['mjdref', 'mjdobs', 'mjdbeg', 'mjdend',\n 'jepoch', 'bepoch', 'tstart', 'tstop', 'xposure',\n 'timsyer', 'timrder', 'timedel', 'timepixr',\n 'timeoffs', 'telapse', 'czphs', 'cperi']\n\n for key in num_keys:\n if key.upper() == 'MJDREF':\n hdrv = [self.header.get('MJDREFIA', np.nan),\n self.header.get('MJDREFFA', np.nan)]\n else:\n hdrv = self.header.get(key, np.nan)\n assert_allclose(getattr(self.w.wcs, key), hdrv)\n\n def test_transforms(self):\n assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1),\n self.w.wcs.crval)\n\n\ndef test_invalid_coordinate_masking():\n\n # Regression test for an issue which caused all coordinates to be set to NaN\n # after a transformation rather than just the invalid ones as reported by\n # WCSLIB. A specific example of this is that when considering an all-sky\n # spectral cube with a spectral axis that is not correlated with the sky\n # axes, if transforming pixel coordinates that did not fall 'in' the sky,\n # the spectral world value was also masked even though that coordinate\n # was valid.\n\n w = wcs.WCS(naxis=3)\n w.wcs.ctype = 'VELO_LSR', 'GLON-CAR', 'GLAT-CAR'\n w.wcs.crval = -20, 0, 0\n w.wcs.crpix = 1, 1441, 241\n w.wcs.cdelt = 1.3, -0.125, 0.125\n\n px = [-10, -10, 20]\n py = [-10, 10, 20]\n pz = [-10, 10, 20]\n\n wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)\n\n # Before fixing this, wx used to return np.nan for the first element\n\n assert_allclose(wx, [-33, -33, 6])\n assert_allclose(wy, [np.nan, 178.75, 177.5])\n assert_allclose(wz, [np.nan, -28.75, -27.5])\n\n\ndef test_no_pixel_area():\n w = wcs.WCS(naxis=3)\n\n # Pixel area cannot be computed\n with pytest.raises(ValueError, match='Pixel area is defined only for 2D pixels'):\n w.proj_plane_pixel_area()\n\n # Pixel scales still possible\n assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)\n\n\ndef test_distortion_header(tmpdir):\n \"\"\"\n Test that plate distortion model is correctly described by `wcs.to_header()`\n and preserved when creating a Cutout2D from the image, writing it to FITS,\n and reading it back from the file.\n \"\"\"\n path = get_pkg_data_filename(\"data/dss.14.29.56-62.41.05.fits.gz\")\n cen = np.array((50, 50))\n siz = np.array((20, 20))\n\n with fits.open(path) as hdulist:\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(hdulist[0].header)\n cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)\n\n # This converts the DSS plate solution model with AMD[XY]n coefficients into a\n # Template Polynomial Distortion model (TPD.FWD.n coefficients);\n # not testing explicitly for the header keywords here.\n\n if _WCSLIB_VER < Version(\"7.4\"):\n with pytest.warns(AstropyWarning, match=\"WCS contains a TPD distortion model in CQDIS\"):\n w0 = wcs.WCS(w.to_header_string())\n with pytest.warns(AstropyWarning, match=\"WCS contains a TPD distortion model in CQDIS\"):\n w1 = wcs.WCS(cut.wcs.to_header_string())\n if _WCSLIB_VER >= Version(\"7.1\"):\n pytest.xfail(\"TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4\")\n else:\n w0 = wcs.WCS(w.to_header_string())\n w1 = wcs.WCS(cut.wcs.to_header_string())\n\n assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.e-3 * u.mas\n assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.e-3 * u.mas\n\n assert w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas\n\n cutfile = str(tmpdir.join('cutout.fits'))\n fits.writeto(cutfile, cut.data, cut.wcs.to_header())\n\n with fits.open(cutfile) as hdulist:\n w2 = wcs.WCS(hdulist[0].header)\n\n assert w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas\n\n\ndef test_pixlist_wcs_colsel():\n \"\"\"\n Test selection of a specific pixel list WCS using ``colsel``. See #11412.\n \"\"\"\n hdr_file = get_pkg_data_filename('data/chandra-pixlist-wcs.hdr')\n hdr = fits.Header.fromtextfile(hdr_file)\n with pytest.warns(wcs.FITSFixedWarning):\n w = wcs.WCS(hdr, keysel=['image', 'pixel'], colsel=[11, 12])\n assert w.naxis == 2\n assert list(w.wcs.ctype) == ['RA---TAN', 'DEC--TAN']\n assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])\n assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])\n assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])\n assert np.allclose(w.wcs.lonpole, 180.)\n\n\n@pytest.mark.skipif(\n _WCSLIB_VER < Version('7.8'),\n reason=\"TIME axis extraction only works with wcslib 7.8 or later\"\n)\ndef test_time_axis_selection():\n w = wcs.WCS(naxis=3)\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'TIME']\n w.wcs.set()\n assert list(w.sub([wcs.WCSSUB_TIME]).wcs.ctype) == ['TIME']\n assert (w.wcs_pix2world([[1, 2, 3]], 0)[0, 2] ==\n w.sub([wcs.WCSSUB_TIME]).wcs_pix2world([[3]], 0)[0, 0])\n\n\n@pytest.mark.skipif(\n _WCSLIB_VER < Version('7.8'),\n reason=\"TIME axis extraction only works with wcslib 7.8 or later\"\n)\ndef test_temporal():\n w = wcs.WCS(naxis=3)\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'TIME']\n w.wcs.set()\n assert w.has_temporal\n assert w.sub([wcs.WCSSUB_TIME]).is_temporal\n assert (w.wcs_pix2world([[1, 2, 3]], 0)[0, 2] ==\n w.temporal.wcs_pix2world([[3]], 0)[0, 0])\n\n\ndef test_swapaxes_same_val_roundtrip():\n w = wcs.WCS(naxis=3)\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\", \"FREQ\"]\n w.wcs.crpix = [32.5, 16.5, 1.]\n w.wcs.crval = [5.63, -72.05, 1.]\n w.wcs.pc = [[5.9e-06, 1.3e-05, 0.0], [-1.2e-05, 5.0e-06, 0.0], [0.0, 0.0, 1.0]]\n w.wcs.cdelt = [1.0, 1.0, 1.0]\n w.wcs.set()\n axes_order = [3, 2, 1]\n axes_order0 = list(i - 1 for i in axes_order)\n ws = w.sub(axes_order)\n imcoord = np.array([3, 5, 7])\n imcoords = imcoord[axes_order0]\n val_ref = w.wcs_pix2world([imcoord], 0)[0]\n val_swapped = ws.wcs_pix2world([imcoords], 0)[0]\n\n # check original axis and swapped give same results\n assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)\n\n # check round-tripping:\n assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)\n"}}},{"rowIdx":1388,"cells":{"hash":{"kind":"string","value":"3ddedd7ee830a2398eb1b755021a6d82f89931a5bec183f75b46f4b5764fb503"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Tests for the auxiliary parameters contained in wcsaux\n\nfrom numpy.testing import assert_allclose\n\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\n\n\nSTR_EXPECTED_EMPTY = \"\"\"\nrsun_ref:\ndsun_obs:\ncrln_obs:\nhgln_obs:\nhglt_obs:\"\"\".lstrip()\n\n\ndef test_empty():\n w = WCS(naxis=1)\n assert w.wcs.aux.rsun_ref is None\n assert w.wcs.aux.dsun_obs is None\n assert w.wcs.aux.crln_obs is None\n assert w.wcs.aux.hgln_obs is None\n assert w.wcs.aux.hglt_obs is None\n assert str(w.wcs.aux) == STR_EXPECTED_EMPTY\n\n\nHEADER_SOLAR = fits.Header.fromstring(\"\"\"\nWCSAXES = 2 / Number of coordinate axes\nCRPIX1 = 64.5 / Pixel coordinate of reference point\nCRPIX2 = 64.5 / Pixel coordinate of reference point\nPC1_1 = 0.99999994260024 / Coordinate transformation matrix element\nPC1_2 = -0.00033882076120692 / Coordinate transformation matrix element\nPC2_1 = 0.00033882076120692 / Coordinate transformation matrix element\nPC2_2 = 0.99999994260024 / Coordinate transformation matrix element\nCDELT1 = 0.0053287911111111 / [deg] Coordinate increment at reference point\nCDELT2 = 0.0053287911111111 / [deg] Coordinate increment at reference point\nCUNIT1 = 'deg' / Units of coordinate increment and value\nCUNIT2 = 'deg' / Units of coordinate increment and value\nCTYPE1 = 'HPLN-TAN' / Coordinate type codegnomonic projection\nCTYPE2 = 'HPLT-TAN' / Coordinate type codegnomonic projection\nCRVAL1 = -0.0012589367249586 / [deg] Coordinate value at reference point\nCRVAL2 = 0.00079599300143911 / [deg] Coordinate value at reference point\nLONPOLE = 180.0 / [deg] Native longitude of celestial pole\nLATPOLE = 0.00079599300143911 / [deg] Native latitude of celestial pole\nDATE-OBS= '2011-02-15T00:00:00.34' / ISO-8601 time of observation\nMJD-OBS = 55607.000003935 / [d] MJD at start of observation\nRSUN_REF= 696000000.0 / [m] Solar radius\nDSUN_OBS= 147724815128.0 / [m] Distance from centre of Sun to observer\nCRLN_OBS= 22.814522 / [deg] Carrington heliographic lng of observer\nCRLT_OBS= -6.820544 / [deg] Heliographic latitude of observer\nHGLN_OBS= 8.431123 / [deg] Stonyhurst heliographic lng of observer\nHGLT_OBS= -6.820544 / [deg] Heliographic latitude of observer\n\"\"\".lstrip(), sep='\\n')\n\n\nSTR_EXPECTED_GET = \"\"\"\nrsun_ref: 696000000.000000\ndsun_obs: 147724815128.000000\ncrln_obs: 22.814522\nhgln_obs: 8.431123\nhglt_obs: -6.820544\"\"\".lstrip()\n\n\ndef test_solar_aux_get():\n w = WCS(HEADER_SOLAR)\n assert_allclose(w.wcs.aux.rsun_ref, 696000000)\n assert_allclose(w.wcs.aux.dsun_obs, 147724815128)\n assert_allclose(w.wcs.aux.crln_obs, 22.814522)\n assert_allclose(w.wcs.aux.hgln_obs, 8.431123)\n assert_allclose(w.wcs.aux.hglt_obs, -6.820544)\n assert str(w.wcs.aux) == STR_EXPECTED_GET\n\n\nSTR_EXPECTED_SET = \"\"\"\nrsun_ref: 698000000.000000\ndsun_obs: 140000000000.000000\ncrln_obs: 10.000000\nhgln_obs: 30.000000\nhglt_obs: 40.000000\"\"\".lstrip()\n\n\ndef test_solar_aux_set():\n\n w = WCS(HEADER_SOLAR)\n\n w.wcs.aux.rsun_ref = 698000000\n assert_allclose(w.wcs.aux.rsun_ref, 698000000)\n\n w.wcs.aux.dsun_obs = 140000000000\n assert_allclose(w.wcs.aux.dsun_obs, 140000000000)\n\n w.wcs.aux.crln_obs = 10.\n assert_allclose(w.wcs.aux.crln_obs, 10.)\n\n w.wcs.aux.hgln_obs = 30.\n assert_allclose(w.wcs.aux.hgln_obs, 30.)\n\n w.wcs.aux.hglt_obs = 40.\n assert_allclose(w.wcs.aux.hglt_obs, 40.)\n\n assert str(w.wcs.aux) == STR_EXPECTED_SET\n\n header = w.to_header()\n assert_allclose(header['RSUN_REF'], 698000000)\n assert_allclose(header['DSUN_OBS'], 140000000000)\n assert_allclose(header['CRLN_OBS'], 10.)\n assert_allclose(header['HGLN_OBS'], 30.)\n assert_allclose(header['HGLT_OBS'], 40.)\n\n\ndef test_set_aux_on_empty():\n\n w = WCS(naxis=2)\n\n w.wcs.aux.rsun_ref = 698000000\n assert_allclose(w.wcs.aux.rsun_ref, 698000000)\n\n w.wcs.aux.dsun_obs = 140000000000\n assert_allclose(w.wcs.aux.dsun_obs, 140000000000)\n\n w.wcs.aux.crln_obs = 10.\n assert_allclose(w.wcs.aux.crln_obs, 10.)\n\n w.wcs.aux.hgln_obs = 30.\n assert_allclose(w.wcs.aux.hgln_obs, 30.)\n\n w.wcs.aux.hglt_obs = 40.\n assert_allclose(w.wcs.aux.hglt_obs, 40.)\n\n assert str(w.wcs.aux) == STR_EXPECTED_SET\n\n header = w.to_header()\n assert_allclose(header['RSUN_REF'], 698000000)\n assert_allclose(header['DSUN_OBS'], 140000000000)\n assert_allclose(header['CRLN_OBS'], 10.)\n assert_allclose(header['HGLN_OBS'], 30.)\n assert_allclose(header['HGLT_OBS'], 40.)\n\n\ndef test_unset_aux():\n w = WCS(HEADER_SOLAR)\n\n assert w.wcs.aux.rsun_ref is not None\n w.wcs.aux.rsun_ref = None\n assert w.wcs.aux.rsun_ref is None\n\n assert w.wcs.aux.dsun_obs is not None\n w.wcs.aux.dsun_obs = None\n assert w.wcs.aux.dsun_obs is None\n\n assert w.wcs.aux.crln_obs is not None\n w.wcs.aux.crln_obs = None\n assert w.wcs.aux.crln_obs is None\n\n assert w.wcs.aux.hgln_obs is not None\n w.wcs.aux.hgln_obs = None\n assert w.wcs.aux.hgln_obs is None\n\n assert w.wcs.aux.hglt_obs is not None\n w.wcs.aux.hglt_obs = None\n assert w.wcs.aux.hglt_obs is None\n\n assert str(w.wcs.aux) == 'rsun_ref:\\ndsun_obs:\\ncrln_obs:\\nhgln_obs:\\nhglt_obs:'\n\n header = w.to_header()\n assert 'RSUN_REF' not in header\n assert 'DSUN_OBS' not in header\n assert 'CRLN_OBS' not in header\n assert 'HGLN_OBS' not in header\n assert 'HGLT_OBS' not in header\n"}}},{"rowIdx":1389,"cells":{"hash":{"kind":"string","value":"a63d81c09307399eaef393af9f68ab392602b3b4366ee882866d9b0e046000af"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport gc\nimport locale\nimport re\n\nfrom packaging.version import Version\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\nfrom astropy.io import fits\nfrom astropy.wcs import wcs\nfrom astropy.wcs import _wcs\nfrom astropy.wcs.wcs import FITSFixedWarning\nfrom astropy.utils.data import (\n get_pkg_data_contents, get_pkg_data_fileobj, get_pkg_data_filename)\nfrom astropy.utils.misc import _set_locale\nfrom astropy import units as u\nfrom astropy.units.core import UnitsWarning\n\n######################################################################\n\n\ndef test_alt():\n w = _wcs.Wcsprm()\n assert w.alt == \" \"\n w.alt = \"X\"\n assert w.alt == \"X\"\n del w.alt\n assert w.alt == \" \"\n\n\ndef test_alt_invalid1():\n w = _wcs.Wcsprm()\n with pytest.raises(ValueError):\n w.alt = \"$\"\n\n\ndef test_alt_invalid2():\n w = _wcs.Wcsprm()\n with pytest.raises(ValueError):\n w.alt = \" \"\n\n\ndef test_axis_types():\n w = _wcs.Wcsprm()\n assert_array_equal(w.axis_types, [0, 0])\n\n\ndef test_cd():\n w = _wcs.Wcsprm()\n w.cd = [[1, 0], [0, 1]]\n assert w.cd.dtype == float\n assert w.has_cd() is True\n assert_array_equal(w.cd, [[1, 0], [0, 1]])\n del w.cd\n assert w.has_cd() is False\n\n\ndef test_cd_missing():\n w = _wcs.Wcsprm()\n assert w.has_cd() is False\n with pytest.raises(AttributeError):\n w.cd\n\n\ndef test_cd_missing2():\n w = _wcs.Wcsprm()\n w.cd = [[1, 0], [0, 1]]\n assert w.has_cd() is True\n del w.cd\n assert w.has_cd() is False\n with pytest.raises(AttributeError):\n w.cd\n\n\ndef test_cd_invalid():\n w = _wcs.Wcsprm()\n with pytest.raises(ValueError):\n w.cd = [1, 0, 0, 1]\n\n\ndef test_cdfix():\n w = _wcs.Wcsprm()\n w.cdfix()\n\n\ndef test_cdelt():\n w = _wcs.Wcsprm()\n assert_array_equal(w.cdelt, [1, 1])\n w.cdelt = [42, 54]\n assert_array_equal(w.cdelt, [42, 54])\n\n\ndef test_cdelt_delete():\n w = _wcs.Wcsprm()\n with pytest.raises(TypeError):\n del w.cdelt\n\n\ndef test_cel_offset():\n w = _wcs.Wcsprm()\n assert w.cel_offset is False\n w.cel_offset = 'foo'\n assert w.cel_offset is True\n w.cel_offset = 0\n assert w.cel_offset is False\n\n\ndef test_celfix():\n # TODO: We need some data with -NCP or -GLS projections to test\n # with. For now, this is just a smoke test\n w = _wcs.Wcsprm()\n assert w.celfix() == -1\n\n\ndef test_cname():\n w = _wcs.Wcsprm()\n # Test that this works as an iterator\n for x in w.cname:\n assert x == ''\n assert list(w.cname) == ['', '']\n w.cname = [b'foo', 'bar']\n assert list(w.cname) == ['foo', 'bar']\n\n\ndef test_cname_invalid():\n w = _wcs.Wcsprm()\n with pytest.raises(TypeError):\n w.cname = [42, 54]\n\n\ndef test_colax():\n w = _wcs.Wcsprm()\n assert w.colax.dtype == np.intc\n assert_array_equal(w.colax, [0, 0])\n w.colax = [42, 54]\n assert_array_equal(w.colax, [42, 54])\n w.colax[0] = 0\n assert_array_equal(w.colax, [0, 54])\n\n with pytest.raises(ValueError):\n w.colax = [1, 2, 3]\n\n\ndef test_colnum():\n w = _wcs.Wcsprm()\n assert w.colnum == 0\n w.colnum = 42\n assert w.colnum == 42\n\n with pytest.raises(OverflowError):\n w.colnum = 0xffffffffffffffffffff\n\n with pytest.raises(OverflowError):\n w.colnum = 0xffffffff\n\n with pytest.raises(TypeError):\n del w.colnum\n\n\ndef test_colnum_invalid():\n w = _wcs.Wcsprm()\n with pytest.raises(TypeError):\n w.colnum = 'foo'\n\n\ndef test_crder():\n w = _wcs.Wcsprm()\n assert w.crder.dtype == float\n assert np.all(np.isnan(w.crder))\n w.crder[0] = 0\n assert np.isnan(w.crder[1])\n assert w.crder[0] == 0\n w.crder = w.crder\n\n\ndef test_crota():\n w = _wcs.Wcsprm()\n w.crota = [1, 0]\n assert w.crota.dtype == float\n assert w.has_crota() is True\n assert_array_equal(w.crota, [1, 0])\n del w.crota\n assert w.has_crota() is False\n\n\ndef test_crota_missing():\n w = _wcs.Wcsprm()\n assert w.has_crota() is False\n with pytest.raises(AttributeError):\n w.crota\n\n\ndef test_crota_missing2():\n w = _wcs.Wcsprm()\n w.crota = [1, 0]\n assert w.has_crota() is True\n del w.crota\n assert w.has_crota() is False\n with pytest.raises(AttributeError):\n w.crota\n\n\ndef test_crpix():\n w = _wcs.Wcsprm()\n assert w.crpix.dtype == float\n assert_array_equal(w.crpix, [0, 0])\n w.crpix = [42, 54]\n assert_array_equal(w.crpix, [42, 54])\n w.crpix[0] = 0\n assert_array_equal(w.crpix, [0, 54])\n\n with pytest.raises(ValueError):\n w.crpix = [1, 2, 3]\n\n\ndef test_crval():\n w = _wcs.Wcsprm()\n assert w.crval.dtype == float\n assert_array_equal(w.crval, [0, 0])\n w.crval = [42, 54]\n assert_array_equal(w.crval, [42, 54])\n w.crval[0] = 0\n assert_array_equal(w.crval, [0, 54])\n\n\ndef test_csyer():\n w = _wcs.Wcsprm()\n assert w.csyer.dtype == float\n assert np.all(np.isnan(w.csyer))\n w.csyer[0] = 0\n assert np.isnan(w.csyer[1])\n assert w.csyer[0] == 0\n w.csyer = w.csyer\n\n\ndef test_ctype():\n w = _wcs.Wcsprm()\n assert list(w.ctype) == ['', '']\n w.ctype = [b'RA---TAN', 'DEC--TAN']\n assert_array_equal(w.axis_types, [2200, 2201])\n assert w.lat == 1\n assert w.lng == 0\n assert w.lattyp == 'DEC'\n assert w.lngtyp == 'RA'\n assert list(w.ctype) == ['RA---TAN', 'DEC--TAN']\n w.ctype = ['foo', 'bar']\n assert_array_equal(w.axis_types, [0, 0])\n assert list(w.ctype) == ['foo', 'bar']\n assert w.lat == -1\n assert w.lng == -1\n assert w.lattyp == 'DEC'\n assert w.lngtyp == 'RA'\n\n\ndef test_ctype_repr():\n w = _wcs.Wcsprm()\n assert list(w.ctype) == ['', '']\n w.ctype = [b'RA-\\t--TAN', 'DEC-\\n-TAN']\n assert repr(w.ctype == '[\"RA-\\t--TAN\", \"DEC-\\n-TAN\"]')\n\n\ndef test_ctype_index_error():\n w = _wcs.Wcsprm()\n assert list(w.ctype) == ['', '']\n for idx in (2, -3):\n with pytest.raises(IndexError):\n w.ctype[idx]\n with pytest.raises(IndexError):\n w.ctype[idx] = 'FOO'\n\n\ndef test_ctype_invalid_error():\n w = _wcs.Wcsprm()\n assert list(w.ctype) == ['', '']\n with pytest.raises(ValueError):\n w.ctype[0] = 'X' * 100\n with pytest.raises(TypeError):\n w.ctype[0] = True\n with pytest.raises(TypeError):\n w.ctype = ['a', 0]\n with pytest.raises(TypeError):\n w.ctype = None\n with pytest.raises(ValueError):\n w.ctype = ['a', 'b', 'c']\n with pytest.raises(ValueError):\n w.ctype = ['FOO', 'A' * 100]\n\n\ndef test_cubeface():\n w = _wcs.Wcsprm()\n assert w.cubeface == -1\n w.cubeface = 0\n with pytest.raises(OverflowError):\n w.cubeface = -1\n\n\ndef test_cunit():\n w = _wcs.Wcsprm()\n assert list(w.cunit) == [u.Unit(''), u.Unit('')]\n w.cunit = [u.m, 'km']\n assert w.cunit[0] == u.m\n assert w.cunit[1] == u.km\n\n\ndef test_cunit_invalid():\n w = _wcs.Wcsprm()\n with pytest.warns(u.UnitsWarning, match='foo') as warns:\n w.cunit[0] = 'foo'\n assert len(warns) == 1\n\n\ndef test_cunit_invalid2():\n w = _wcs.Wcsprm()\n with pytest.warns(u.UnitsWarning) as warns:\n w.cunit = ['foo', 'bar']\n assert len(warns) == 2\n assert 'foo' in str(warns[0].message)\n assert 'bar' in str(warns[1].message)\n\n\ndef test_unit():\n w = wcs.WCS()\n w.wcs.cunit[0] = u.erg\n assert w.wcs.cunit[0] == u.erg\n\n assert repr(w.wcs.cunit) == \"['erg', '']\"\n\n\ndef test_unit2():\n w = wcs.WCS()\n with pytest.warns(UnitsWarning):\n myunit = u.Unit(\"FOOBAR\", parse_strict=\"warn\")\n w.wcs.cunit[0] = myunit\n\n\ndef test_unit3():\n w = wcs.WCS()\n for idx in (2, -3):\n with pytest.raises(IndexError):\n w.wcs.cunit[idx]\n with pytest.raises(IndexError):\n w.wcs.cunit[idx] = u.m\n with pytest.raises(ValueError):\n w.wcs.cunit = [u.m, u.m, u.m]\n\n\ndef test_unitfix():\n w = _wcs.Wcsprm()\n w.unitfix()\n\n\ndef test_cylfix():\n # TODO: We need some data with broken cylindrical projections to\n # test with. For now, this is just a smoke test.\n w = _wcs.Wcsprm()\n assert w.cylfix() == -1\n\n assert w.cylfix([0, 1]) == -1\n\n with pytest.raises(ValueError):\n w.cylfix([0, 1, 2])\n\n\ndef test_dateavg():\n w = _wcs.Wcsprm()\n assert w.dateavg == ''\n # TODO: When dateavg is verified, check that it works\n\n\ndef test_dateobs():\n w = _wcs.Wcsprm()\n assert w.dateobs == ''\n # TODO: When dateavg is verified, check that it works\n\n\ndef test_datfix():\n w = _wcs.Wcsprm()\n w.dateobs = '31/12/99'\n assert w.datfix() == 0\n assert w.dateobs == '1999-12-31'\n assert w.mjdobs == 51543.0\n\n\ndef test_equinox():\n w = _wcs.Wcsprm()\n assert np.isnan(w.equinox)\n w.equinox = 0\n assert w.equinox == 0\n del w.equinox\n assert np.isnan(w.equinox)\n\n with pytest.raises(TypeError):\n w.equinox = None\n\n\ndef test_fix():\n w = _wcs.Wcsprm()\n fix_ref = {\n 'cdfix': 'No change',\n 'cylfix': 'No change',\n 'obsfix': 'No change',\n 'datfix': 'No change',\n 'spcfix': 'No change',\n 'unitfix': 'No change',\n 'celfix': 'No change',\n 'obsfix': 'No change'}\n\n version = wcs._wcs.__version__\n if Version(version) <= Version('5'):\n del fix_ref['obsfix']\n\n if Version(version) >= Version('7.1'):\n w.dateref = '1858-11-17'\n\n if Version('7.4') <= Version(version) < Version('7.6'):\n fix_ref['datfix'] = 'Success'\n\n assert w.fix() == fix_ref\n\n\ndef test_fix2():\n w = _wcs.Wcsprm()\n w.dateobs = '31/12/99'\n fix_ref = {\n 'cdfix': 'No change',\n 'cylfix': 'No change',\n 'obsfix': 'No change',\n 'datfix': \"Set MJD-OBS to 51543.000000 from DATE-OBS.\\nChanged DATE-OBS from '31/12/99' to '1999-12-31'\", # noqa\n 'spcfix': 'No change',\n 'unitfix': 'No change',\n 'celfix': 'No change'}\n version = wcs._wcs.__version__\n if Version(version) <= Version(\"5\"):\n del fix_ref['obsfix']\n fix_ref['datfix'] = \"Changed '31/12/99' to '1999-12-31'\"\n\n if Version(version) >= Version('7.3'):\n fix_ref['datfix'] = \"Set DATEREF to '1858-11-17' from MJDREF.\\n\" + fix_ref['datfix']\n\n elif Version(version) >= Version('7.1'):\n fix_ref['datfix'] = \"Set DATE-REF to '1858-11-17' from MJD-REF.\\n\" + fix_ref['datfix']\n\n assert w.fix() == fix_ref\n assert w.dateobs == '1999-12-31'\n assert w.mjdobs == 51543.0\n\n\ndef test_fix3():\n w = _wcs.Wcsprm()\n w.dateobs = '31/12/F9'\n fix_ref = {\n 'cdfix': 'No change',\n 'cylfix': 'No change',\n 'obsfix': 'No change',\n 'datfix': \"Invalid DATE-OBS format '31/12/F9'\",\n 'spcfix': 'No change',\n 'unitfix': 'No change',\n 'celfix': 'No change'\n }\n\n version = wcs._wcs.__version__\n if Version(version) <= Version(\"5\"):\n del fix_ref['obsfix']\n fix_ref['datfix'] = \"Invalid parameter value: invalid date '31/12/F9'\"\n\n if Version(version) >= Version('7.3'):\n fix_ref['datfix'] = \"Set DATEREF to '1858-11-17' from MJDREF.\\n\" + fix_ref['datfix']\n elif Version(version) >= Version('7.1'):\n fix_ref['datfix'] = \"Set DATE-REF to '1858-11-17' from MJD-REF.\\n\" + fix_ref['datfix']\n\n assert w.fix() == fix_ref\n assert w.dateobs == '31/12/F9'\n assert np.isnan(w.mjdobs)\n\n\ndef test_fix4():\n w = _wcs.Wcsprm()\n with pytest.raises(ValueError):\n w.fix('X')\n\n\ndef test_fix5():\n w = _wcs.Wcsprm()\n with pytest.raises(ValueError):\n w.fix(naxis=[0, 1, 2])\n\n\ndef test_get_ps():\n # TODO: We need some data with PSi_ma keywords\n w = _wcs.Wcsprm()\n assert len(w.get_ps()) == 0\n\n\ndef test_get_pv():\n # TODO: We need some data with PVi_ma keywords\n w = _wcs.Wcsprm()\n assert len(w.get_pv()) == 0\n\n\ndef test_imgpix_matrix():\n w = _wcs.Wcsprm()\n with pytest.raises(AssertionError):\n w.imgpix_matrix\n\n\ndef test_imgpix_matrix2():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.imgpix_matrix = None\n\n\ndef test_isunity():\n w = _wcs.Wcsprm()\n assert(w.is_unity())\n\n\ndef test_lat():\n w = _wcs.Wcsprm()\n assert w.lat == -1\n\n\ndef test_lat_set():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.lat = 0\n\n\ndef test_latpole():\n w = _wcs.Wcsprm()\n assert w.latpole == 90.0\n w.latpole = 45.0\n assert w.latpole == 45.0\n del w.latpole\n assert w.latpole == 90.0\n\n\ndef test_lattyp():\n w = _wcs.Wcsprm()\n assert w.lattyp == \" \"\n\n\ndef test_lattyp_set():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.lattyp = 0\n\n\ndef test_lng():\n w = _wcs.Wcsprm()\n assert w.lng == -1\n\n\ndef test_lng_set():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.lng = 0\n\n\ndef test_lngtyp():\n w = _wcs.Wcsprm()\n assert w.lngtyp == \" \"\n\n\ndef test_lngtyp_set():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.lngtyp = 0\n\n\ndef test_lonpole():\n w = _wcs.Wcsprm()\n assert np.isnan(w.lonpole)\n w.lonpole = 45.0\n assert w.lonpole == 45.0\n del w.lonpole\n assert np.isnan(w.lonpole)\n\n\ndef test_mix():\n w = _wcs.Wcsprm()\n w.ctype = [b'RA---TAN', 'DEC--TAN']\n with pytest.raises(_wcs.InvalidCoordinateError):\n w.mix(1, 1, [240, 480], 1, 5, [0, 2], [54, 32], 1)\n\n\ndef test_mjdavg():\n w = _wcs.Wcsprm()\n assert np.isnan(w.mjdavg)\n w.mjdavg = 45.0\n assert w.mjdavg == 45.0\n del w.mjdavg\n assert np.isnan(w.mjdavg)\n\n\ndef test_mjdobs():\n w = _wcs.Wcsprm()\n assert np.isnan(w.mjdobs)\n w.mjdobs = 45.0\n assert w.mjdobs == 45.0\n del w.mjdobs\n assert np.isnan(w.mjdobs)\n\n\ndef test_name():\n w = _wcs.Wcsprm()\n assert w.name == ''\n w.name = 'foo'\n assert w.name == 'foo'\n\n\ndef test_naxis():\n w = _wcs.Wcsprm()\n assert w.naxis == 2\n\n\ndef test_naxis_set():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.naxis = 4\n\n\ndef test_obsgeo():\n w = _wcs.Wcsprm()\n assert np.all(np.isnan(w.obsgeo))\n w.obsgeo = [1, 2, 3, 4, 5, 6]\n assert_array_equal(w.obsgeo, [1, 2, 3, 4, 5, 6])\n del w.obsgeo\n assert np.all(np.isnan(w.obsgeo))\n\n\ndef test_pc():\n w = _wcs.Wcsprm()\n assert w.has_pc()\n assert_array_equal(w.pc, [[1, 0], [0, 1]])\n w.cd = [[1, 0], [0, 1]]\n assert not w.has_pc()\n del w.cd\n assert w.has_pc()\n assert_array_equal(w.pc, [[1, 0], [0, 1]])\n w.pc = w.pc\n\n\ndef test_pc_missing():\n w = _wcs.Wcsprm()\n w.cd = [[1, 0], [0, 1]]\n assert not w.has_pc()\n with pytest.raises(AttributeError):\n w.pc\n\n\ndef test_phi0():\n w = _wcs.Wcsprm()\n assert np.isnan(w.phi0)\n w.phi0 = 42.0\n assert w.phi0 == 42.0\n del w.phi0\n assert np.isnan(w.phi0)\n\n\ndef test_piximg_matrix():\n w = _wcs.Wcsprm()\n with pytest.raises(AssertionError):\n w.piximg_matrix\n\n\ndef test_piximg_matrix2():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.piximg_matrix = None\n\n\ndef test_print_contents():\n # In general, this is human-consumable, so we don't care if the\n # content changes, just check the type\n w = _wcs.Wcsprm()\n assert isinstance(str(w), str)\n\n\ndef test_radesys():\n w = _wcs.Wcsprm()\n assert w.radesys == ''\n w.radesys = 'foo'\n assert w.radesys == 'foo'\n\n\ndef test_restfrq():\n w = _wcs.Wcsprm()\n assert w.restfrq == 0.0\n w.restfrq = np.nan\n assert np.isnan(w.restfrq)\n del w.restfrq\n\n\ndef test_restwav():\n w = _wcs.Wcsprm()\n assert w.restwav == 0.0\n w.restwav = np.nan\n assert np.isnan(w.restwav)\n del w.restwav\n\n\ndef test_set_ps():\n w = _wcs.Wcsprm()\n data = [(0, 0, \"param1\"), (1, 1, \"param2\")]\n w.set_ps(data)\n assert w.get_ps() == data\n\n\ndef test_set_ps_realloc():\n w = _wcs.Wcsprm()\n w.set_ps([(0, 0, \"param1\")] * 16)\n\n\ndef test_set_pv():\n w = _wcs.Wcsprm()\n data = [(0, 0, 42.), (1, 1, 54.)]\n w.set_pv(data)\n assert w.get_pv() == data\n\n\ndef test_set_pv_realloc():\n w = _wcs.Wcsprm()\n w.set_pv([(0, 0, 42.)] * 16)\n\n\ndef test_spcfix():\n # TODO: We need some data with broken spectral headers here to\n # really test\n header = get_pkg_data_contents(\n 'data/spectra/orion-velo-1.hdr', encoding='binary')\n w = _wcs.Wcsprm(header)\n assert w.spcfix() == -1\n\n\ndef test_spec():\n w = _wcs.Wcsprm()\n assert w.spec == -1\n\n\ndef test_spec_set():\n w = _wcs.Wcsprm()\n with pytest.raises(AttributeError):\n w.spec = 0\n\n\ndef test_specsys():\n w = _wcs.Wcsprm()\n assert w.specsys == ''\n w.specsys = 'foo'\n assert w.specsys == 'foo'\n\n\ndef test_sptr():\n # TODO: Write me\n pass\n\n\ndef test_ssysobs():\n w = _wcs.Wcsprm()\n assert w.ssysobs == ''\n w.ssysobs = 'foo'\n assert w.ssysobs == 'foo'\n\n\ndef test_ssyssrc():\n w = _wcs.Wcsprm()\n assert w.ssyssrc == ''\n w.ssyssrc = 'foo'\n assert w.ssyssrc == 'foo'\n\n\ndef test_tab():\n w = _wcs.Wcsprm()\n assert len(w.tab) == 0\n # TODO: Inject some headers that have tables and test\n\n\ndef test_theta0():\n w = _wcs.Wcsprm()\n assert np.isnan(w.theta0)\n w.theta0 = 42.0\n assert w.theta0 == 42.0\n del w.theta0\n assert np.isnan(w.theta0)\n\n\ndef test_toheader():\n w = _wcs.Wcsprm()\n assert isinstance(w.to_header(), str)\n\n\ndef test_velangl():\n w = _wcs.Wcsprm()\n assert np.isnan(w.velangl)\n w.velangl = 42.0\n assert w.velangl == 42.0\n del w.velangl\n assert np.isnan(w.velangl)\n\n\ndef test_velosys():\n w = _wcs.Wcsprm()\n assert np.isnan(w.velosys)\n w.velosys = 42.0\n assert w.velosys == 42.0\n del w.velosys\n assert np.isnan(w.velosys)\n\n\ndef test_velref():\n w = _wcs.Wcsprm()\n assert w.velref == 0.0\n w.velref = 42\n assert w.velref == 42.0\n del w.velref\n assert w.velref == 0.0\n\n\ndef test_zsource():\n w = _wcs.Wcsprm()\n assert np.isnan(w.zsource)\n w.zsource = 42.0\n assert w.zsource == 42.0\n del w.zsource\n assert np.isnan(w.zsource)\n\n\ndef test_cd_3d():\n header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')\n w = _wcs.Wcsprm(header)\n assert w.cd.shape == (3, 3)\n assert w.get_pc().shape == (3, 3)\n assert w.get_cdelt().shape == (3,)\n\n\ndef test_get_pc():\n header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')\n w = _wcs.Wcsprm(header)\n pc = w.get_pc()\n try:\n pc[0, 0] = 42\n except (RuntimeError, ValueError):\n pass\n else:\n raise AssertionError()\n\n\ndef test_detailed_err():\n w = _wcs.Wcsprm()\n w.pc = [[0, 0], [0, 0]]\n with pytest.raises(_wcs.SingularMatrixError):\n w.set()\n\n\ndef test_header_parse():\n from astropy.io import fits\n with get_pkg_data_fileobj(\n 'data/header_newlines.fits', encoding='binary') as test_file:\n hdulist = fits.open(test_file)\n with pytest.warns(FITSFixedWarning):\n w = wcs.WCS(hdulist[0].header)\n assert w.wcs.ctype[0] == 'RA---TAN-SIP'\n\n\ndef test_locale():\n try:\n with _set_locale('fr_FR'):\n header = get_pkg_data_contents('data/locale.hdr',\n encoding='binary')\n with pytest.warns(FITSFixedWarning):\n w = _wcs.Wcsprm(header)\n assert re.search(\"[0-9]+,[0-9]*\", w.to_header()) is None\n except locale.Error:\n pytest.xfail(\n \"Can't set to 'fr_FR' locale, perhaps because it is not installed \"\n \"on this system\")\n\n\ndef test_unicode():\n w = _wcs.Wcsprm()\n with pytest.raises(UnicodeEncodeError):\n w.alt = \"‰\"\n\n\ndef test_sub_segfault():\n # Issue #1960\n header = fits.Header.fromtextfile(\n get_pkg_data_filename('data/sub-segfault.hdr'))\n w = wcs.WCS(header)\n w.sub([wcs.WCSSUB_CELESTIAL])\n gc.collect()\n\n\ndef test_bounds_check():\n w = _wcs.Wcsprm()\n w.bounds_check(False)\n\n\ndef test_wcs_sub_error_message():\n # Issue #1587\n w = _wcs.Wcsprm()\n with pytest.raises(TypeError) as e:\n w.sub('latitude')\n assert e.match(\"axes must None, a sequence or an integer$\")\n\n\ndef test_wcs_sub():\n # Issue #3356\n w = _wcs.Wcsprm()\n w.sub(['latitude'])\n\n w = _wcs.Wcsprm()\n w.sub([b'latitude'])\n\n\ndef test_compare():\n header = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')\n w = _wcs.Wcsprm(header)\n w2 = _wcs.Wcsprm(header)\n\n assert w == w2\n\n w.equinox = 42\n assert w == w2\n\n assert not w.compare(w2)\n assert w.compare(w2, _wcs.WCSCOMPARE_ANCILLARY)\n\n w = _wcs.Wcsprm(header)\n w2 = _wcs.Wcsprm(header)\n\n with pytest.warns(RuntimeWarning):\n w.cdelt[0] = np.float32(0.00416666666666666666666666)\n w2.cdelt[0] = np.float64(0.00416666666666666666666666)\n\n assert not w.compare(w2)\n assert w.compare(w2, tolerance=1e-6)\n\n\ndef test_radesys_defaults():\n w = _wcs.Wcsprm()\n w.ctype = ['RA---TAN', 'DEC--TAN']\n w.set()\n assert w.radesys == \"ICRS\"\n\n\ndef test_radesys_defaults_full():\n\n # As described in Section 3.1 of the FITS standard \"Equatorial and ecliptic\n # coordinates\", for those systems the RADESYS keyword can be used to\n # indicate the equatorial/ecliptic frame to use. From the standard:\n\n # \"For RADESYSa values of FK4 and FK4-NO-E, any stated equinox is Besselian\n # and, if neither EQUINOXa nor EPOCH are given, a default of 1950.0 is to\n # be taken. For FK5, any stated equinox is Julian and, if neither keyword\n # is given, it defaults to 2000.0.\n\n # \"If the EQUINOXa keyword is given it should always be accompanied by\n # RADESYS a. However, if it should happen to ap- pear by itself then\n # RADESYSa defaults to FK4 if EQUINOXa < 1984.0, or to FK5 if EQUINOXa\n # 1984.0. Note that these defaults, while probably true of older files\n # using the EPOCH keyword, are not required of them.\n\n # By default RADESYS is empty\n w = _wcs.Wcsprm(naxis=2)\n assert w.radesys == ''\n assert np.isnan(w.equinox)\n\n # For non-ecliptic or equatorial systems it is still empty\n w = _wcs.Wcsprm(naxis=2)\n for ctype in [('GLON-CAR', 'GLAT-CAR'),\n ('SLON-SIN', 'SLAT-SIN')]:\n w.ctype = ctype\n w.set()\n assert w.radesys == ''\n assert np.isnan(w.equinox)\n\n for ctype in [('RA---TAN', 'DEC--TAN'),\n ('ELON-TAN', 'ELAT-TAN'),\n ('DEC--TAN', 'RA---TAN'),\n ('ELAT-TAN', 'ELON-TAN')]:\n\n # Check defaults for RADESYS\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.set()\n assert w.radesys == 'ICRS'\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.equinox = 1980\n w.set()\n assert w.radesys == 'FK4'\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.equinox = 1984\n w.set()\n assert w.radesys == 'FK5'\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.radesys = 'foo'\n w.set()\n assert w.radesys == 'foo'\n\n # Check defaults for EQUINOX\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.set()\n assert np.isnan(w.equinox) # frame is ICRS, no equinox\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.radesys = 'ICRS'\n w.set()\n assert np.isnan(w.equinox)\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.radesys = 'FK5'\n w.set()\n assert w.equinox == 2000.\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.radesys = 'FK4'\n w.set()\n assert w.equinox == 1950\n\n w = _wcs.Wcsprm(naxis=2)\n w.ctype = ctype\n w.radesys = 'FK4-NO-E'\n w.set()\n assert w.equinox == 1950\n\n\ndef test_iteration():\n world = np.array(\n [[-0.58995335, -0.5],\n [0.00664326, -0.5],\n [-0.58995335, -0.25],\n [0.00664326, -0.25],\n [-0.58995335, 0.],\n [0.00664326, 0.],\n [-0.58995335, 0.25],\n [0.00664326, 0.25],\n [-0.58995335, 0.5],\n [0.00664326, 0.5]],\n float\n )\n\n w = wcs.WCS()\n w.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']\n w.wcs.cdelt = [-0.006666666828, 0.006666666828]\n w.wcs.crpix = [75.907, 74.8485]\n x = w.wcs_world2pix(world, 1)\n\n expected = np.array(\n [[1.64400000e+02, -1.51498185e-01],\n [7.49105110e+01, -1.51498185e-01],\n [1.64400000e+02, 3.73485009e+01],\n [7.49105110e+01, 3.73485009e+01],\n [1.64400000e+02, 7.48485000e+01],\n [7.49105110e+01, 7.48485000e+01],\n [1.64400000e+02, 1.12348499e+02],\n [7.49105110e+01, 1.12348499e+02],\n [1.64400000e+02, 1.49848498e+02],\n [7.49105110e+01, 1.49848498e+02]],\n float)\n\n assert_array_almost_equal(x, expected)\n\n w2 = w.wcs_pix2world(x, 1)\n\n world[:, 0] %= 360.\n\n assert_array_almost_equal(w2, world)\n\n\ndef test_invalid_args():\n with pytest.raises(TypeError):\n _wcs.Wcsprm(keysel='A')\n\n with pytest.raises(ValueError):\n _wcs.Wcsprm(keysel=2)\n\n with pytest.raises(ValueError):\n _wcs.Wcsprm(colsel=2)\n\n with pytest.raises(ValueError):\n _wcs.Wcsprm(naxis=64)\n\n header = get_pkg_data_contents(\n 'data/spectra/orion-velo-1.hdr', encoding='binary')\n with pytest.raises(ValueError):\n _wcs.Wcsprm(header, relax='FOO')\n\n with pytest.raises(ValueError):\n _wcs.Wcsprm(header, naxis=3)\n\n with pytest.raises(KeyError):\n _wcs.Wcsprm(header, key='A')\n\n\n# Test keywords in the Time standard\n\n\ndef test_datebeg():\n w = _wcs.Wcsprm()\n assert w.datebeg == ''\n w.datebeg = '2001-02-11'\n assert w.datebeg == '2001-02-11'\n w.datebeg = '31/12/99'\n fix_ref = {\n 'cdfix': 'No change',\n 'cylfix': 'No change',\n 'obsfix': 'No change',\n 'datfix': \"Invalid DATE-BEG format '31/12/99'\",\n 'spcfix': 'No change',\n 'unitfix': 'No change',\n 'celfix': 'No change'}\n\n if Version(wcs._wcs.__version__) >= Version('7.3'):\n fix_ref['datfix'] = \"Set DATEREF to '1858-11-17' from MJDREF.\\n\" + fix_ref['datfix']\n elif Version(wcs._wcs.__version__) >= Version('7.1'):\n fix_ref['datfix'] = \"Set DATE-REF to '1858-11-17' from MJD-REF.\\n\" + fix_ref['datfix']\n\n assert w.fix() == fix_ref\n\n\nchar_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',\n 'dateref', 'dateavg', 'dateend']\n\n\n@pytest.mark.parametrize('key', char_keys)\ndef test_char_keys(key):\n w = _wcs.Wcsprm()\n assert getattr(w, key) == ''\n setattr(w, key, \"foo\")\n assert getattr(w, key) == 'foo'\n with pytest.raises(TypeError):\n setattr(w, key, 42)\n\n\nnum_keys = ['mjdobs', 'mjdbeg', 'mjdend', 'jepoch',\n 'bepoch', 'tstart', 'tstop', 'xposure', 'timsyer',\n 'timrder', 'timedel', 'timepixr', 'timeoffs',\n 'telapse', 'xposure']\n\n\n@pytest.mark.parametrize('key', num_keys)\ndef test_num_keys(key):\n w = _wcs.Wcsprm()\n assert np.isnan(getattr(w, key))\n setattr(w, key, 42.0)\n assert getattr(w, key) == 42.0\n delattr(w, key)\n assert np.isnan(getattr(w, key))\n with pytest.raises(TypeError):\n setattr(w, key, \"foo\")\n\n\n@pytest.mark.parametrize('key', ['czphs', 'cperi', 'mjdref'])\ndef test_array_keys(key):\n w = _wcs.Wcsprm()\n attr = getattr(w, key)\n if key == 'mjdref' and Version(_wcs.__version__) >= Version('7.1'):\n assert np.allclose(attr, [0, 0])\n else:\n assert np.all(np.isnan(attr))\n assert attr.dtype == float\n setattr(w, key, [1., 2.])\n assert_array_equal(getattr(w, key), [1., 2.])\n with pytest.raises(ValueError):\n setattr(w, key, [\"foo\", \"bar\"])\n with pytest.raises(ValueError):\n setattr(w, key, \"foo\")\n"}}},{"rowIdx":1390,"cells":{"hash":{"kind":"string","value":"89ed2a3746da4c2c99da0eb15c23e3fd48e87c93ea33d4ab590457a99c6d1a3c"},"content":{"kind":"string","value":"import numbers\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom astropy.utils import isiterable\nfrom astropy.utils.decorators import lazyproperty\n\nfrom ..low_level_api import BaseLowLevelWCS\nfrom .base import BaseWCSWrapper\n\n__all__ = ['sanitize_slices', 'SlicedLowLevelWCS']\n\n\ndef sanitize_slices(slices, ndim):\n \"\"\"\n Given a slice as input sanitise it to an easier to parse format.format\n\n This function returns a list ``ndim`` long containing slice objects (or ints).\n \"\"\"\n\n if not isinstance(slices, (tuple, list)): # We just have a single int\n slices = (slices,)\n\n if len(slices) > ndim:\n raise ValueError(\n f\"The dimensionality of the specified slice {slices} can not be greater \"\n f\"than the dimensionality ({ndim}) of the wcs.\")\n\n if any(isiterable(s) for s in slices):\n raise IndexError(\"This slice is invalid, only integer or range slices are supported.\")\n\n slices = list(slices)\n\n if Ellipsis in slices:\n if slices.count(Ellipsis) > 1:\n raise IndexError(\"an index can only have a single ellipsis ('...')\")\n\n # Replace the Ellipsis with the correct number of slice(None)s\n e_ind = slices.index(Ellipsis)\n slices.remove(Ellipsis)\n n_e = ndim - len(slices)\n for i in range(n_e):\n ind = e_ind + i\n slices.insert(ind, slice(None))\n\n for i in range(ndim):\n if i < len(slices):\n slc = slices[i]\n if isinstance(slc, slice):\n if slc.step and slc.step != 1:\n raise IndexError(\"Slicing WCS with a step is not supported.\")\n elif not isinstance(slc, numbers.Integral):\n raise IndexError(\"Only integer or range slices are accepted.\")\n else:\n slices.append(slice(None))\n\n return slices\n\n\ndef combine_slices(slice1, slice2):\n \"\"\"\n Given two slices that can be applied to a 1-d array, find the resulting\n slice that corresponds to the combination of both slices. We assume that\n slice2 can be an integer, but slice1 cannot.\n \"\"\"\n\n if isinstance(slice1, slice) and slice1.step is not None:\n raise ValueError('Only slices with steps of 1 are supported')\n\n if isinstance(slice2, slice) and slice2.step is not None:\n raise ValueError('Only slices with steps of 1 are supported')\n\n if isinstance(slice2, numbers.Integral):\n if slice1.start is None:\n return slice2\n else:\n return slice2 + slice1.start\n\n if slice1.start is None:\n if slice1.stop is None:\n return slice2\n else:\n if slice2.stop is None:\n return slice(slice2.start, slice1.stop)\n else:\n return slice(slice2.start, min(slice1.stop, slice2.stop))\n else:\n if slice2.start is None:\n start = slice1.start\n else:\n start = slice1.start + slice2.start\n if slice2.stop is None:\n stop = slice1.stop\n else:\n if slice1.start is None:\n stop = slice2.stop\n else:\n stop = slice2.stop + slice1.start\n if slice1.stop is not None:\n stop = min(slice1.stop, stop)\n return slice(start, stop)\n\n\nclass SlicedLowLevelWCS(BaseWCSWrapper):\n \"\"\"\n A Low Level WCS wrapper which applies an array slice to a WCS.\n\n This class does not modify the underlying WCS object and can therefore drop\n coupled dimensions as it stores which pixel and world dimensions have been\n sliced out (or modified) in the underlying WCS and returns the modified\n results on all the Low Level WCS methods.\n\n Parameters\n ----------\n wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`\n The WCS to slice.\n slices : `slice` or `tuple` or `int`\n A valid array slice to apply to the WCS.\n\n \"\"\"\n def __init__(self, wcs, slices):\n\n slices = sanitize_slices(slices, wcs.pixel_n_dim)\n\n if isinstance(wcs, SlicedLowLevelWCS):\n # Here we combine the current slices with the previous slices\n # to avoid ending up with many nested WCSes\n self._wcs = wcs._wcs\n slices_original = wcs._slices_array.copy()\n for ipixel in range(wcs.pixel_n_dim):\n ipixel_orig = wcs._wcs.pixel_n_dim - 1 - wcs._pixel_keep[ipixel]\n ipixel_new = wcs.pixel_n_dim - 1 - ipixel\n slices_original[ipixel_orig] = combine_slices(slices_original[ipixel_orig],\n slices[ipixel_new])\n self._slices_array = slices_original\n else:\n self._wcs = wcs\n self._slices_array = slices\n\n self._slices_pixel = self._slices_array[::-1]\n\n # figure out which pixel dimensions have been kept, then use axis correlation\n # matrix to figure out which world dims are kept\n self._pixel_keep = np.nonzero([not isinstance(self._slices_pixel[ip], numbers.Integral)\n for ip in range(self._wcs.pixel_n_dim)])[0]\n\n # axis_correlation_matrix[world, pixel]\n self._world_keep = np.nonzero(\n self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1))[0]\n\n if len(self._pixel_keep) == 0 or len(self._world_keep) == 0:\n raise ValueError(\"Cannot slice WCS: the resulting WCS should have \"\n \"at least one pixel and one world dimension.\")\n\n @lazyproperty\n def dropped_world_dimensions(self):\n \"\"\"\n Information describing the dropped world dimensions.\n \"\"\"\n world_coords = self._pixel_to_world_values_all(*[0]*len(self._pixel_keep))\n dropped_info = defaultdict(list)\n\n for i in range(self._wcs.world_n_dim):\n\n if i in self._world_keep:\n continue\n\n if \"world_axis_object_classes\" not in dropped_info:\n dropped_info[\"world_axis_object_classes\"] = dict()\n\n wao_classes = self._wcs.world_axis_object_classes\n wao_components = self._wcs.world_axis_object_components\n\n dropped_info[\"value\"].append(world_coords[i])\n dropped_info[\"world_axis_names\"].append(self._wcs.world_axis_names[i])\n dropped_info[\"world_axis_physical_types\"].append(self._wcs.world_axis_physical_types[i])\n dropped_info[\"world_axis_units\"].append(self._wcs.world_axis_units[i])\n dropped_info[\"world_axis_object_components\"].append(wao_components[i])\n dropped_info[\"world_axis_object_classes\"].update(dict(\n filter(\n lambda x: x[0] == wao_components[i][0], wao_classes.items()\n )\n ))\n dropped_info[\"serialized_classes\"] = self.serialized_classes\n return dict(dropped_info)\n\n @property\n def pixel_n_dim(self):\n return len(self._pixel_keep)\n\n @property\n def world_n_dim(self):\n return len(self._world_keep)\n\n @property\n def world_axis_physical_types(self):\n return [self._wcs.world_axis_physical_types[i] for i in self._world_keep]\n\n @property\n def world_axis_units(self):\n return [self._wcs.world_axis_units[i] for i in self._world_keep]\n\n @property\n def pixel_axis_names(self):\n return [self._wcs.pixel_axis_names[i] for i in self._pixel_keep]\n\n @property\n def world_axis_names(self):\n return [self._wcs.world_axis_names[i] for i in self._world_keep]\n\n def _pixel_to_world_values_all(self, *pixel_arrays):\n pixel_arrays = tuple(map(np.asanyarray, pixel_arrays))\n pixel_arrays_new = []\n ipix_curr = -1\n for ipix in range(self._wcs.pixel_n_dim):\n if isinstance(self._slices_pixel[ipix], numbers.Integral):\n pixel_arrays_new.append(self._slices_pixel[ipix])\n else:\n ipix_curr += 1\n if self._slices_pixel[ipix].start is not None:\n pixel_arrays_new.append(pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start)\n else:\n pixel_arrays_new.append(pixel_arrays[ipix_curr])\n\n pixel_arrays_new = np.broadcast_arrays(*pixel_arrays_new)\n return self._wcs.pixel_to_world_values(*pixel_arrays_new)\n\n def pixel_to_world_values(self, *pixel_arrays):\n world_arrays = self._pixel_to_world_values_all(*pixel_arrays)\n\n # Detect the case of a length 0 array\n if isinstance(world_arrays, np.ndarray) and not world_arrays.shape:\n return world_arrays\n\n if self._wcs.world_n_dim > 1:\n # Select the dimensions of the original WCS we are keeping.\n world_arrays = [world_arrays[iw] for iw in self._world_keep]\n # If there is only one world dimension (after slicing) we shouldn't return a tuple.\n if self.world_n_dim == 1:\n world_arrays = world_arrays[0]\n\n return world_arrays\n\n def world_to_pixel_values(self, *world_arrays):\n world_arrays = tuple(map(np.asanyarray, world_arrays))\n world_arrays_new = []\n iworld_curr = -1\n for iworld in range(self._wcs.world_n_dim):\n if iworld in self._world_keep:\n iworld_curr += 1\n world_arrays_new.append(world_arrays[iworld_curr])\n else:\n world_arrays_new.append(1.)\n\n world_arrays_new = np.broadcast_arrays(*world_arrays_new)\n pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new))\n\n for ipixel in range(self._wcs.pixel_n_dim):\n if isinstance(self._slices_pixel[ipixel], slice) and self._slices_pixel[ipixel].start is not None:\n pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start\n\n # Detect the case of a length 0 array\n if isinstance(pixel_arrays, np.ndarray) and not pixel_arrays.shape:\n return pixel_arrays\n pixel = tuple(pixel_arrays[ip] for ip in self._pixel_keep)\n if self.pixel_n_dim == 1 and self._wcs.pixel_n_dim > 1:\n pixel = pixel[0]\n return pixel\n\n @property\n def world_axis_object_components(self):\n return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep]\n\n @property\n def world_axis_object_classes(self):\n keys_keep = [item[0] for item in self.world_axis_object_components]\n return dict([item for item in self._wcs.world_axis_object_classes.items() if item[0] in keys_keep])\n\n @property\n def array_shape(self):\n if self._wcs.array_shape:\n return np.broadcast_to(0, self._wcs.array_shape)[tuple(self._slices_array)].shape\n\n @property\n def pixel_shape(self):\n if self.array_shape:\n return tuple(self.array_shape[::-1])\n\n @property\n def pixel_bounds(self):\n if self._wcs.pixel_bounds is None:\n return\n\n bounds = []\n for idx in self._pixel_keep:\n if self._slices_pixel[idx].start is None:\n bounds.append(self._wcs.pixel_bounds[idx])\n else:\n imin, imax = self._wcs.pixel_bounds[idx]\n start = self._slices_pixel[idx].start\n bounds.append((imin - start, imax - start))\n\n return tuple(bounds)\n\n @property\n def axis_correlation_matrix(self):\n return self._wcs.axis_correlation_matrix[self._world_keep][:, self._pixel_keep]\n"}}},{"rowIdx":1391,"cells":{"hash":{"kind":"string","value":"436046348e98f3fe038e301b923f09a05c74c7671efe8a75ffa0b74a8ca208dc"},"content":{"kind":"string","value":"# Note that we test the main astropy.wcs.WCS class directly rather than testing\n# the mix-in class on its own (since it's not functional without being used as\n# a mix-in)\n\nimport warnings\n\nfrom packaging.version import Version\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_equal, assert_allclose\nfrom itertools import product\n\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.units import Quantity\nfrom astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation\nfrom astropy.io.fits import Header\nfrom astropy.io.fits.verify import VerifyWarning\nfrom astropy.units.core import UnitsWarning\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.wcs.wcs import WCS, FITSFixedWarning, Sip, NoConvergence\nfrom astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES\nfrom astropy.wcs._wcs import __version__ as wcsver\nfrom astropy.utils import iers\nfrom astropy.utils.exceptions import AstropyUserWarning\n\n###############################################################################\n# The following example is the simplest WCS with default values\n###############################################################################\n\n\nWCS_EMPTY = WCS(naxis=1)\nWCS_EMPTY.wcs.crpix = [1]\n\n\ndef test_empty():\n\n wcs = WCS_EMPTY\n\n # Low-level API\n\n assert wcs.pixel_n_dim == 1\n assert wcs.world_n_dim == 1\n assert wcs.array_shape is None\n assert wcs.pixel_shape is None\n assert wcs.world_axis_physical_types == [None]\n assert wcs.world_axis_units == ['']\n assert wcs.pixel_axis_names == ['']\n assert wcs.world_axis_names == ['']\n\n assert_equal(wcs.axis_correlation_matrix, True)\n\n assert wcs.world_axis_object_components == [('world', 0, 'value')]\n\n assert wcs.world_axis_object_classes['world'][0] is Quantity\n assert wcs.world_axis_object_classes['world'][1] == ()\n assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one\n\n assert_allclose(wcs.pixel_to_world_values(29), 29)\n assert_allclose(wcs.array_index_to_world_values(29), 29)\n\n assert np.ndim(wcs.pixel_to_world_values(29)) == 0\n assert np.ndim(wcs.array_index_to_world_values(29)) == 0\n\n assert_allclose(wcs.world_to_pixel_values(29), 29)\n assert_equal(wcs.world_to_array_index_values(29), (29,))\n\n assert np.ndim(wcs.world_to_pixel_values(29)) == 0\n assert np.ndim(wcs.world_to_array_index_values(29)) == 0\n\n # High-level API\n\n coord = wcs.pixel_to_world(29)\n assert_quantity_allclose(coord, 29 * u.one)\n assert np.ndim(coord) == 0\n\n coord = wcs.array_index_to_world(29)\n assert_quantity_allclose(coord, 29 * u.one)\n assert np.ndim(coord) == 0\n\n coord = 15 * u.one\n\n x = wcs.world_to_pixel(coord)\n assert_allclose(x, 15.)\n assert np.ndim(x) == 0\n\n i = wcs.world_to_array_index(coord)\n assert_equal(i, 15)\n assert np.ndim(i) == 0\n\n\n###############################################################################\n# The following example is a simple 2D image with celestial coordinates\n###############################################################################\n\nHEADER_SIMPLE_CELESTIAL = \"\"\"\nWCSAXES = 2\nCTYPE1 = RA---TAN\nCTYPE2 = DEC--TAN\nCRVAL1 = 10\nCRVAL2 = 20\nCRPIX1 = 30\nCRPIX2 = 40\nCDELT1 = -0.1\nCDELT2 = 0.1\nCROTA2 = 0.\nCUNIT1 = deg\nCUNIT2 = deg\n\"\"\"\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', VerifyWarning)\n WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(\n HEADER_SIMPLE_CELESTIAL, sep='\\n'))\n\n\ndef test_simple_celestial():\n\n wcs = WCS_SIMPLE_CELESTIAL\n\n # Low-level API\n\n assert wcs.pixel_n_dim == 2\n assert wcs.world_n_dim == 2\n assert wcs.array_shape is None\n assert wcs.pixel_shape is None\n assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec']\n assert wcs.world_axis_units == ['deg', 'deg']\n assert wcs.pixel_axis_names == ['', '']\n assert wcs.world_axis_names == ['', '']\n\n assert_equal(wcs.axis_correlation_matrix, True)\n\n assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),\n ('celestial', 1, 'spherical.lat.degree')]\n\n assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord\n assert wcs.world_axis_object_classes['celestial'][1] == ()\n assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)\n assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg\n\n assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))\n assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))\n\n assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.))\n assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))\n\n # High-level API\n\n coord = wcs.pixel_to_world(29, 39)\n assert isinstance(coord, SkyCoord)\n assert isinstance(coord.frame, ICRS)\n assert_allclose(coord.ra.deg, 10)\n assert_allclose(coord.dec.deg, 20)\n\n coord = wcs.array_index_to_world(39, 29)\n assert isinstance(coord, SkyCoord)\n assert isinstance(coord.frame, ICRS)\n assert_allclose(coord.ra.deg, 10)\n assert_allclose(coord.dec.deg, 20)\n\n coord = SkyCoord(10, 20, unit='deg', frame='icrs')\n\n x, y = wcs.world_to_pixel(coord)\n assert_allclose(x, 29.)\n assert_allclose(y, 39.)\n\n i, j = wcs.world_to_array_index(coord)\n assert_equal(i, 39)\n assert_equal(j, 29)\n\n # Check that if the coordinates are passed in a different frame things still\n # work properly\n\n coord_galactic = coord.galactic\n\n x, y = wcs.world_to_pixel(coord_galactic)\n assert_allclose(x, 29.)\n assert_allclose(y, 39.)\n\n i, j = wcs.world_to_array_index(coord_galactic)\n assert_equal(i, 39)\n assert_equal(j, 29)\n\n # Check that we can actually index the array\n\n data = np.arange(3600).reshape((60, 60))\n\n coord = SkyCoord(10, 20, unit='deg', frame='icrs')\n index = wcs.world_to_array_index(coord)\n assert_equal(data[index], 2369)\n\n coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs')\n index = wcs.world_to_array_index(coord)\n assert_equal(data[index], [2369, 3550])\n\n\n###############################################################################\n# The following example is a spectral cube with axes in an unusual order\n###############################################################################\n\nHEADER_SPECTRAL_CUBE = \"\"\"\nWCSAXES = 3\nCTYPE1 = GLAT-CAR\nCTYPE2 = FREQ\nCTYPE3 = GLON-CAR\nCNAME1 = Latitude\nCNAME2 = Frequency\nCNAME3 = Longitude\nCRVAL1 = 10\nCRVAL2 = 20\nCRVAL3 = 25\nCRPIX1 = 30\nCRPIX2 = 40\nCRPIX3 = 45\nCDELT1 = -0.1\nCDELT2 = 0.5\nCDELT3 = 0.1\nCUNIT1 = deg\nCUNIT2 = Hz\nCUNIT3 = deg\n\"\"\"\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', VerifyWarning)\n WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\\n'))\n\n\ndef test_spectral_cube():\n\n # Spectral cube with a weird axis ordering\n\n wcs = WCS_SPECTRAL_CUBE\n\n # Low-level API\n\n assert wcs.pixel_n_dim == 3\n assert wcs.world_n_dim == 3\n assert wcs.array_shape is None\n assert wcs.pixel_shape is None\n assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']\n assert wcs.world_axis_units == ['deg', 'Hz', 'deg']\n assert wcs.pixel_axis_names == ['', '', '']\n assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']\n\n assert_equal(wcs.axis_correlation_matrix, [[True, False, True],\n [False, True, False],\n [True, False, True]])\n\n assert len(wcs.world_axis_object_components) == 3\n assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')\n assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)\n assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')\n\n assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord\n assert wcs.world_axis_object_classes['celestial'][1] == ()\n assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)\n assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg\n\n assert wcs.world_axis_object_classes['spectral'][0] is Quantity\n assert wcs.world_axis_object_classes['spectral'][1] == ()\n assert wcs.world_axis_object_classes['spectral'][2] == {}\n\n assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))\n assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))\n\n assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))\n assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))\n\n # High-level API\n\n coord, spec = wcs.pixel_to_world(29, 39, 44)\n assert isinstance(coord, SkyCoord)\n assert isinstance(coord.frame, Galactic)\n assert_allclose(coord.l.deg, 25)\n assert_allclose(coord.b.deg, 10)\n assert isinstance(spec, SpectralCoord)\n assert_allclose(spec.to_value(u.Hz), 20)\n\n coord, spec = wcs.array_index_to_world(44, 39, 29)\n assert isinstance(coord, SkyCoord)\n assert isinstance(coord.frame, Galactic)\n assert_allclose(coord.l.deg, 25)\n assert_allclose(coord.b.deg, 10)\n assert isinstance(spec, SpectralCoord)\n assert_allclose(spec.to_value(u.Hz), 20)\n\n coord = SkyCoord(25, 10, unit='deg', frame='galactic')\n spec = 20 * u.Hz\n\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n x, y, z = wcs.world_to_pixel(coord, spec)\n assert_allclose(x, 29.)\n assert_allclose(y, 39.)\n assert_allclose(z, 44.)\n\n # Order of world coordinates shouldn't matter\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n x, y, z = wcs.world_to_pixel(spec, coord)\n assert_allclose(x, 29.)\n assert_allclose(y, 39.)\n assert_allclose(z, 44.)\n\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n i, j, k = wcs.world_to_array_index(coord, spec)\n assert_equal(i, 44)\n assert_equal(j, 39)\n assert_equal(k, 29)\n\n # Order of world coordinates shouldn't matter\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n i, j, k = wcs.world_to_array_index(spec, coord)\n assert_equal(i, 44)\n assert_equal(j, 39)\n assert_equal(k, 29)\n\n\nHEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\\n' + \"\"\"\nPC2_3 = -0.5\nPC3_2 = +0.5\n\"\"\"\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', VerifyWarning)\n WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring(\n HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\\n'))\n\n\ndef test_spectral_cube_nonaligned():\n\n # Make sure that correlation matrix gets adjusted if there are non-identity\n # CD matrix terms.\n\n wcs = WCS_SPECTRAL_CUBE_NONALIGNED\n\n assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']\n assert wcs.world_axis_units == ['deg', 'Hz', 'deg']\n assert wcs.pixel_axis_names == ['', '', '']\n assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']\n\n assert_equal(wcs.axis_correlation_matrix, [[True, True, True],\n [False, True, True],\n [True, True, True]])\n\n # NOTE: we check world_axis_object_components and world_axis_object_classes\n # again here because in the past this failed when non-aligned axes were\n # present, so this serves as a regression test.\n\n assert len(wcs.world_axis_object_components) == 3\n assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')\n assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)\n assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')\n\n assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord\n assert wcs.world_axis_object_classes['celestial'][1] == ()\n assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)\n assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg\n\n assert wcs.world_axis_object_classes['spectral'][0] is Quantity\n assert wcs.world_axis_object_classes['spectral'][1] == ()\n assert wcs.world_axis_object_classes['spectral'][2] == {}\n\n###############################################################################\n# The following example is from Rots et al (2015), Table 5. It represents a\n# cube with two spatial dimensions and one time dimension\n###############################################################################\n\n\nHEADER_TIME_CUBE = \"\"\"\nSIMPLE = T / Fits standard\nBITPIX = -32 / Bits per pixel\nNAXIS = 3 / Number of axes\nNAXIS1 = 2048 / Axis length\nNAXIS2 = 2048 / Axis length\nNAXIS3 = 11 / Axis length\nDATE = '2008-10-28T14:39:06' / Date FITS file was generated\nOBJECT = '2008 TC3' / Name of the object observed\nEXPTIME = 1.0011 / Integration time\nMJD-OBS = 54746.02749237 / Obs start\nDATE-OBS= '2008-10-07T00:39:35.3342' / Observing date\nTELESCOP= 'VISTA' / ESO Telescope Name\nINSTRUME= 'VIRCAM' / Instrument used.\nTIMESYS = 'UTC' / From Observatory Time System\nTREFPOS = 'TOPOCENT' / Topocentric\nMJDREF = 54746.0 / Time reference point in MJD\nRADESYS = 'ICRS' / Not equinoctal\nCTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection\nCRVAL2 = 2.01824372640628 / RA at ref pixel\nCUNIT2 = 'deg' / Angles are degrees always\nCRPIX2 = 2956.6 / Pixel coordinate at ref point\nCTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection\nCRVAL1 = 14.8289418840003 / Dec at ref pixel\nCUNIT1 = 'deg' / Angles are degrees always\nCRPIX1 = -448.2 / Pixel coordinate at ref point\nCTYPE3 = 'UTC' / linear time (UTC)\nCRVAL3 = 2375.341 / Relative time of first frame\nCUNIT3 = 's' / Time unit\nCRPIX3 = 1.0 / Pixel coordinate at ref point\nCTYPE3A = 'TT' / alternative linear time (TT)\nCRVAL3A = 2440.525 / Relative time of first frame\nCUNIT3A = 's' / Time unit\nCRPIX3A = 1.0 / Pixel coordinate at ref point\nOBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+\nOBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+\nOBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid\nCRDER3 = 0.0819 / random error in timings from fit\nCSYER3 = 0.0100 / absolute time error\nPC1_1 = 0.999999971570892 / WCS transform matrix element\nPC1_2 = 0.000238449608932 / WCS transform matrix element\nPC2_1 = -0.000621542859395 / WCS transform matrix element\nPC2_2 = 0.999999806842218 / WCS transform matrix element\nCDELT1 = -9.48575432499806E-5 / Axis scale at reference point\nCDELT2 = 9.48683176211164E-5 / Axis scale at reference point\nCDELT3 = 13.3629 / Axis scale at reference point\nPV1_1 = 1. / ZPN linear term\nPV1_3 = 42. / ZPN cubic term\n\"\"\"\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))\n WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\\n'))\n\n\ndef test_time_cube():\n\n # Spectral cube with a weird axis ordering\n\n wcs = WCS_TIME_CUBE\n\n assert wcs.pixel_n_dim == 3\n assert wcs.world_n_dim == 3\n assert wcs.array_shape == (11, 2048, 2048)\n assert wcs.pixel_shape == (2048, 2048, 11)\n assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time']\n assert wcs.world_axis_units == ['deg', 'deg', 's']\n assert wcs.pixel_axis_names == ['', '', '']\n assert wcs.world_axis_names == ['', '', '']\n\n assert_equal(wcs.axis_correlation_matrix, [[True, True, False],\n [True, True, False],\n [False, False, True]])\n\n components = wcs.world_axis_object_components\n assert components[0] == ('celestial', 1, 'spherical.lat.degree')\n assert components[1] == ('celestial', 0, 'spherical.lon.degree')\n assert components[2][:2] == ('time', 0)\n assert callable(components[2][2])\n\n assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord\n assert wcs.world_axis_object_classes['celestial'][1] == ()\n assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)\n assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg\n\n assert wcs.world_axis_object_classes['time'][0] is Time\n assert wcs.world_axis_object_classes['time'][1] == ()\n assert wcs.world_axis_object_classes['time'][2] == {}\n assert callable(wcs.world_axis_object_classes['time'][3])\n\n assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0),\n (14.8289418840003, 2.01824372640628, 2375.341))\n\n assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2),\n (14.8289418840003, 2.01824372640628, 2375.341))\n\n assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),\n (-449.2, 2955.6, 0))\n assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),\n (0, 2956, -449))\n\n # High-level API\n\n coord, time = wcs.pixel_to_world(29, 39, 44)\n assert isinstance(coord, SkyCoord)\n assert isinstance(coord.frame, ICRS)\n assert_allclose(coord.ra.deg, 1.7323356692202325)\n assert_allclose(coord.dec.deg, 14.783516054817797)\n assert isinstance(time, Time)\n assert_allclose(time.mjd, 54746.03429755324)\n\n coord, time = wcs.array_index_to_world(44, 39, 29)\n assert isinstance(coord, SkyCoord)\n assert isinstance(coord.frame, ICRS)\n assert_allclose(coord.ra.deg, 1.7323356692202325)\n assert_allclose(coord.dec.deg, 14.783516054817797)\n assert isinstance(time, Time)\n assert_allclose(time.mjd, 54746.03429755324)\n\n x, y, z = wcs.world_to_pixel(coord, time)\n assert_allclose(x, 29.)\n assert_allclose(y, 39.)\n assert_allclose(z, 44.)\n\n # Order of world coordinates shouldn't matter\n x, y, z = wcs.world_to_pixel(time, coord)\n assert_allclose(x, 29.)\n assert_allclose(y, 39.)\n assert_allclose(z, 44.)\n\n i, j, k = wcs.world_to_array_index(coord, time)\n assert_equal(i, 44)\n assert_equal(j, 39)\n assert_equal(k, 29)\n\n # Order of world coordinates shouldn't matter\n i, j, k = wcs.world_to_array_index(time, coord)\n assert_equal(i, 44)\n assert_equal(j, 39)\n assert_equal(k, 29)\n\n###############################################################################\n# The following tests are to make sure that Time objects are constructed\n# correctly for a variety of combinations of WCS keywords\n###############################################################################\n\n\nHEADER_TIME_1D = \"\"\"\nSIMPLE = T\nBITPIX = -32\nNAXIS = 1\nNAXIS1 = 2048\nTIMESYS = 'UTC'\nTREFPOS = 'TOPOCENT'\nMJDREF = 50002.6\nCTYPE1 = 'UTC'\nCRVAL1 = 5\nCUNIT1 = 's'\nCRPIX1 = 1.0\nCDELT1 = 2\nOBSGEO-L= -20\nOBSGEO-B= -70\nOBSGEO-H= 2530\n\"\"\"\n\nif Version(wcsver) >= Version('7.1'):\n HEADER_TIME_1D += \"DATEREF = '1995-10-12T14:24:00'\\n\"\n\n\n@pytest.fixture\ndef header_time_1d():\n return Header.fromstring(HEADER_TIME_1D, sep='\\n')\n\n\ndef assert_time_at(header, position, jd1, jd2, scale, format):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header)\n time = wcs.pixel_to_world(position)\n assert_allclose(time.jd1, jd1, rtol=1e-10)\n assert_allclose(time.jd2, jd2, rtol=1e-10)\n assert time.format == format\n assert time.scale == scale\n\n\n@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local'))\ndef test_time_1d_values(header_time_1d, scale):\n\n # Check that Time objects are instantiated with the correct values,\n # scales, and formats.\n\n header_time_1d['CTYPE1'] = scale.upper()\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd')\n\n\ndef test_time_1d_values_gps(header_time_1d):\n # Special treatment for GPS scale\n header_time_1d['CTYPE1'] = 'GPS'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd')\n\n\ndef test_time_1d_values_deprecated(header_time_1d):\n # Deprecated (in FITS) scales\n header_time_1d['CTYPE1'] = 'TDT'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')\n header_time_1d['CTYPE1'] = 'IAT'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')\n header_time_1d['CTYPE1'] = 'GMT'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')\n header_time_1d['CTYPE1'] = 'ET'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')\n\n\ndef test_time_1d_values_time(header_time_1d):\n header_time_1d['CTYPE1'] = 'TIME'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')\n header_time_1d['TIMESYS'] = 'TAI'\n assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc'))\ndef test_time_1d_roundtrip(header_time_1d, scale):\n\n # Check that coordinates round-trip\n\n pixel_in = np.arange(3, 10)\n\n header_time_1d['CTYPE1'] = scale.upper()\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header_time_1d)\n\n # Simple test\n time = wcs.pixel_to_world(pixel_in)\n pixel_out = wcs.world_to_pixel(time)\n assert_allclose(pixel_in, pixel_out)\n\n # Test with an intermediate change to a different scale/format\n time = wcs.pixel_to_world(pixel_in).tdb\n time.format = 'isot'\n pixel_out = wcs.world_to_pixel(time)\n assert_allclose(pixel_in, pixel_out)\n\n\ndef test_time_1d_high_precision(header_time_1d):\n\n # Case where the MJDREF is split into two for high precision\n del header_time_1d['MJDREF']\n header_time_1d['MJDREFI'] = 52000.\n header_time_1d['MJDREFF'] = 1e-11\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header_time_1d)\n\n time = wcs.pixel_to_world(10)\n\n # Here we have to use a very small rtol to really test that MJDREFF is\n # taken into account\n assert_allclose(time.jd1, 2452001.0, rtol=1e-12)\n assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)\n\n\ndef test_time_1d_location_geodetic(header_time_1d):\n\n # Make sure that the location is correctly returned (geodetic case)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header_time_1d)\n\n time = wcs.pixel_to_world(10)\n\n lon, lat, alt = time.location.to_geodetic()\n\n # FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976\n # ellipsoid (https://github.com/astropy/astropy/issues/9420)\n assert_allclose(lon.degree, -20)\n assert_allclose(lat.degree, -70)\n # assert_allclose(alt.to_value(u.m), 2530.)\n\n\n@pytest.fixture\ndef header_time_1d_no_obs():\n header = Header.fromstring(HEADER_TIME_1D, sep='\\n')\n del header['OBSGEO-L']\n del header['OBSGEO-B']\n del header['OBSGEO-H']\n return header\n\n\ndef test_time_1d_location_geocentric(header_time_1d_no_obs):\n\n # Make sure that the location is correctly returned (geocentric case)\n\n header = header_time_1d_no_obs\n\n header['OBSGEO-X'] = 10\n header['OBSGEO-Y'] = -20\n header['OBSGEO-Z'] = 30\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header)\n\n time = wcs.pixel_to_world(10)\n\n x, y, z = time.location.to_geocentric()\n\n assert_allclose(x.to_value(u.m), 10)\n assert_allclose(y.to_value(u.m), -20)\n assert_allclose(z.to_value(u.m), 30)\n\n\ndef test_time_1d_location_geocenter(header_time_1d_no_obs):\n\n header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER'\n\n wcs = WCS(header_time_1d_no_obs)\n time = wcs.pixel_to_world(10)\n\n x, y, z = time.location.to_geocentric()\n\n assert_allclose(x.to_value(u.m), 0)\n assert_allclose(y.to_value(u.m), 0)\n assert_allclose(z.to_value(u.m), 0)\n\n\ndef test_time_1d_location_missing(header_time_1d_no_obs):\n\n # Check what happens when no location is present\n\n wcs = WCS(header_time_1d_no_obs)\n with pytest.warns(UserWarning,\n match='Missing or incomplete observer location '\n 'information, setting location in Time to None'):\n time = wcs.pixel_to_world(10)\n\n assert time.location is None\n\n\ndef test_time_1d_location_incomplete(header_time_1d_no_obs):\n\n # Check what happens when location information is incomplete\n\n header_time_1d_no_obs['OBSGEO-L'] = 10.\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header_time_1d_no_obs)\n\n with pytest.warns(UserWarning,\n match='Missing or incomplete observer location '\n 'information, setting location in Time to None'):\n time = wcs.pixel_to_world(10)\n\n assert time.location is None\n\n\ndef test_time_1d_location_unsupported(header_time_1d_no_obs):\n\n # Check what happens when TREFPOS is unsupported\n\n header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER'\n\n wcs = WCS(header_time_1d_no_obs)\n with pytest.warns(UserWarning,\n match=\"Observation location 'barycenter' is not \"\n \"supported, setting location in Time to None\"):\n time = wcs.pixel_to_world(10)\n\n assert time.location is None\n\n\ndef test_time_1d_unsupported_ctype(header_time_1d_no_obs):\n\n # For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale\n\n # Case where the MJDREF is split into two for high precision\n header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)'\n\n wcs = WCS(header_time_1d_no_obs)\n with pytest.warns(UserWarning,\n match=\"Dropping unsupported sub-scale WWV from scale UT\"):\n time = wcs.pixel_to_world(10)\n\n assert isinstance(time, Time)\n\n\n###############################################################################\n# Extra corner cases\n###############################################################################\n\n\ndef test_unrecognized_unit():\n # TODO: Determine whether the following behavior is desirable\n wcs = WCS(naxis=1)\n with pytest.warns(UnitsWarning):\n wcs.wcs.cunit = ['bananas // sekonds']\n assert wcs.world_axis_units == ['bananas // sekonds']\n\n\ndef test_distortion_correlations():\n\n filename = get_pkg_data_filename('../../tests/data/sip.fits')\n with pytest.warns(FITSFixedWarning):\n w = WCS(filename)\n assert_equal(w.axis_correlation_matrix, True)\n\n # Changing PC to an identity matrix doesn't change anything since\n # distortions are still present.\n w.wcs.pc = [[1, 0], [0, 1]]\n assert_equal(w.axis_correlation_matrix, True)\n\n # Nor does changing the name of the axes to make them non-celestial\n w.wcs.ctype = ['X', 'Y']\n assert_equal(w.axis_correlation_matrix, True)\n\n # However once we turn off the distortions the matrix changes\n w.sip = None\n assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])\n\n # If we go back to celestial coordinates then the matrix is all True again\n w.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n assert_equal(w.axis_correlation_matrix, True)\n\n # Or if we change to X/Y but have a non-identity PC\n w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]\n w.wcs.ctype = ['X', 'Y']\n assert_equal(w.axis_correlation_matrix, True)\n\n\ndef test_custom_ctype_to_ucd_mappings():\n\n wcs = WCS(naxis=1)\n wcs.wcs.ctype = ['SPAM']\n\n assert wcs.world_axis_physical_types == [None]\n\n # Check simple behavior\n\n with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):\n assert wcs.world_axis_physical_types == [None]\n\n with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}):\n assert wcs.world_axis_physical_types == ['food.spam']\n\n # Check nesting\n\n with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):\n with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):\n assert wcs.world_axis_physical_types == ['food.spam']\n\n with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):\n with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):\n assert wcs.world_axis_physical_types == ['food.spam']\n\n # Check priority in nesting\n\n with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):\n with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):\n assert wcs.world_axis_physical_types == ['food.spam']\n\n with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):\n with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):\n assert wcs.world_axis_physical_types == ['notfood']\n\n\ndef test_caching_components_and_classes():\n\n # Make sure that when we change the WCS object, the classes and components\n # are updated (we use a cache internally, so we need to make sure the cache\n # is invalidated if needed)\n\n wcs = WCS_SIMPLE_CELESTIAL.deepcopy()\n\n assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),\n ('celestial', 1, 'spherical.lat.degree')]\n\n assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord\n assert wcs.world_axis_object_classes['celestial'][1] == ()\n assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)\n assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg\n\n wcs.wcs.radesys = 'FK5'\n\n frame = wcs.world_axis_object_classes['celestial'][2]['frame']\n assert isinstance(frame, FK5)\n assert frame.equinox.jyear == 2000.\n\n wcs.wcs.equinox = 2010\n\n frame = wcs.world_axis_object_classes['celestial'][2]['frame']\n assert isinstance(frame, FK5)\n assert frame.equinox.jyear == 2010.\n\n\ndef test_sub_wcsapi_attributes():\n\n # Regression test for a bug that caused some of the WCS attributes to be\n # incorrect when using WCS.sub or WCS.celestial (which is an alias for sub\n # with lon/lat types).\n\n wcs = WCS_SPECTRAL_CUBE.deepcopy()\n wcs.pixel_shape = (30, 40, 50)\n wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]\n\n # Use celestial shortcut\n\n wcs_sub1 = wcs.celestial\n\n assert wcs_sub1.pixel_n_dim == 2\n assert wcs_sub1.world_n_dim == 2\n assert wcs_sub1.array_shape == (50, 30)\n assert wcs_sub1.pixel_shape == (30, 50)\n assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]\n assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon']\n assert wcs_sub1.world_axis_units == ['deg', 'deg']\n assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude']\n\n # Try adding axes\n\n wcs_sub2 = wcs.sub([0, 2, 0])\n\n assert wcs_sub2.pixel_n_dim == 3\n assert wcs_sub2.world_n_dim == 3\n assert wcs_sub2.array_shape == (None, 40, None)\n assert wcs_sub2.pixel_shape == (None, 40, None)\n assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]\n assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None]\n assert wcs_sub2.world_axis_units == ['', 'Hz', '']\n assert wcs_sub2.world_axis_names == ['', 'Frequency', '']\n\n # Use strings\n\n wcs_sub3 = wcs.sub(['longitude', 'latitude'])\n\n assert wcs_sub3.pixel_n_dim == 2\n assert wcs_sub3.world_n_dim == 2\n assert wcs_sub3.array_shape == (30, 50)\n assert wcs_sub3.pixel_shape == (50, 30)\n assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]\n assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']\n assert wcs_sub3.world_axis_units == ['deg', 'deg']\n assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude']\n\n # Now try without CNAME set\n\n wcs.wcs.cname = [''] * wcs.wcs.naxis\n wcs_sub4 = wcs.sub(['longitude', 'latitude'])\n\n assert wcs_sub4.pixel_n_dim == 2\n assert wcs_sub4.world_n_dim == 2\n assert wcs_sub4.array_shape == (30, 50)\n assert wcs_sub4.pixel_shape == (50, 30)\n assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]\n assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']\n assert wcs_sub4.world_axis_units == ['deg', 'deg']\n assert wcs_sub4.world_axis_names == ['', '']\n\n\nHEADER_POLARIZED = \"\"\"\nCTYPE1 = 'HPLT-TAN'\nCTYPE2 = 'HPLN-TAN'\nCTYPE3 = 'STOKES'\n\"\"\"\n\n\n@pytest.fixture\ndef header_polarized():\n return Header.fromstring(HEADER_POLARIZED, sep='\\n')\n\n\ndef test_phys_type_polarization(header_polarized):\n w = WCS(header_polarized)\n assert w.world_axis_physical_types[2] == 'phys.polarization.stokes'\n\n\n###############################################################################\n# Spectral transformations\n###############################################################################\n\nHEADER_SPECTRAL_FRAMES = \"\"\"\nBUNIT = 'Jy/beam'\nEQUINOX = 2.000000000E+03\nCTYPE1 = 'RA---SIN'\nCRVAL1 = 2.60108333333E+02\nCDELT1 = -2.777777845E-04\nCRPIX1 = 1.0\nCUNIT1 = 'deg'\nCTYPE2 = 'DEC--SIN'\nCRVAL2 = -9.75000000000E-01\nCDELT2 = 2.777777845E-04\nCRPIX2 = 1.0\nCUNIT2 = 'deg'\nCTYPE3 = 'FREQ'\nCRVAL3 = 1.37835117405E+09\nCDELT3 = 9.765625000E+04\nCRPIX3 = 32.0\nCUNIT3 = 'Hz'\nSPECSYS = 'TOPOCENT'\nRESTFRQ = 1.420405752E+09 / [Hz]\nRADESYS = 'FK5'\n\"\"\"\n\n\n@pytest.fixture\ndef header_spectral_frames():\n return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\\n')\n\n\ndef test_spectralcoord_frame(header_spectral_frames):\n\n # This is a test to check the numerical results of transformations between\n # different velocity frames. We simply make sure that the returned\n # SpectralCoords are in the right frame but don't check the transformations\n # since this is already done in test_spectralcoord_accuracy\n # in astropy.coordinates.\n\n with iers.conf.set_temp('auto_download', False):\n\n obstime = Time(\"2009-05-04T04:44:23\", scale='utc')\n\n header = header_spectral_frames.copy()\n header['MJD-OBS'] = obstime.mjd\n header['CRVAL1'] = 16.33211\n header['CRVAL2'] = -34.2221\n header['OBSGEO-L'] = 144.2\n header['OBSGEO-B'] = -20.2\n header['OBSGEO-H'] = 0.\n\n # We start off with a WCS defined in topocentric frequency\n with pytest.warns(FITSFixedWarning):\n wcs_topo = WCS(header)\n\n # We convert a single pixel coordinate to world coordinates and keep only\n # the second high level object - a SpectralCoord:\n sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]\n\n # We check that this is in topocentric frame with zero velocities\n assert isinstance(sc_topo, SpectralCoord)\n assert isinstance(sc_topo.observer, ITRS)\n assert sc_topo.observer.obstime.isot == obstime.isot\n assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0)\n\n observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS())\n assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km\n\n for specsys, expected_frame in VELOCITY_FRAMES.items():\n\n header['SPECSYS'] = specsys\n with pytest.warns(FITSFixedWarning):\n wcs = WCS(header)\n sc = wcs.pixel_to_world(0, 0, 31)[1]\n\n # Now transform to the expected velocity frame, which should leave\n # the spectral coordinate unchanged\n sc_check = sc.with_observer_stationary_relative_to(expected_frame)\n assert_quantity_allclose(sc.quantity, sc_check.quantity)\n\n\n@pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))\ndef test_different_ctypes(header_spectral_frames, ctype3, observer):\n\n header = header_spectral_frames.copy()\n header['CTYPE3'] = ctype3\n header['CRVAL3'] = 0.1\n header['CDELT3'] = 0.001\n\n if ctype3[0] == 'V':\n header['CUNIT3'] = 'm s-1'\n else:\n header['CUNIT3'] = ''\n\n header['RESTWAV'] = 1.420405752E+09\n header['MJD-OBS'] = 55197\n\n if observer:\n header['OBSGEO-L'] = 144.2\n header['OBSGEO-B'] = -20.2\n header['OBSGEO-H'] = 0.\n header['SPECSYS'] = 'BARYCENT'\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header)\n\n skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)\n\n assert isinstance(spectralcoord, SpectralCoord)\n\n if observer:\n pix = wcs.world_to_pixel(skycoord, spectralcoord)\n else:\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n pix = wcs.world_to_pixel(skycoord, spectralcoord)\n\n assert_allclose(pix, [0, 0, 31], rtol=1e-6, atol=1e-9)\n\n\ndef test_non_convergence_warning():\n \"\"\"Test case for issue #11446\n Since we can't define a target accuracy when plotting a WCS `all_world2pix`\n should not error but only warn when the default accuracy can't be reached.\n \"\"\"\n # define a minimal WCS where convergence fails for certain image positions\n wcs = WCS(naxis=2)\n crpix = [0, 0]\n a = b = ap = bp = np.zeros((4, 4))\n a[3, 0] = -1.20116753e-07\n\n test_pos_x = [1000, 1]\n test_pos_y = [0, 2]\n\n wcs.sip = Sip(a, b, ap, bp, crpix)\n # first make sure the WCS works when using a low accuracy\n expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)\n\n # then check that it fails when using the default accuracy\n with pytest.raises(NoConvergence):\n wcs.all_world2pix(test_pos_x, test_pos_y, 0)\n\n # at last check that world_to_pixel_values raises a warning but returns\n # the same 'low accuray' result\n with pytest.warns(UserWarning):\n assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y),\n expected)\n\n\nHEADER_SPECTRAL_1D = \"\"\"\nCTYPE1 = 'FREQ'\nCRVAL1 = 1.37835117405E+09\nCDELT1 = 9.765625000E+04\nCRPIX1 = 32.0\nCUNIT1 = 'Hz'\nSPECSYS = 'TOPOCENT'\nRESTFRQ = 1.420405752E+09 / [Hz]\nRADESYS = 'FK5'\n\"\"\"\n\n\n@pytest.fixture\ndef header_spectral_1d():\n return Header.fromstring(HEADER_SPECTRAL_1D, sep='\\n')\n\n\n@pytest.mark.parametrize(('ctype1', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))\ndef test_spectral_1d(header_spectral_1d, ctype1, observer):\n\n # This is a regression test for issues that happened with 1-d WCS\n # where the target is not defined but observer is.\n\n header = header_spectral_1d.copy()\n header['CTYPE1'] = ctype1\n header['CRVAL1'] = 0.1\n header['CDELT1'] = 0.001\n\n if ctype1[0] == 'V':\n header['CUNIT1'] = 'm s-1'\n else:\n header['CUNIT1'] = ''\n\n header['RESTWAV'] = 1.420405752E+09\n header['MJD-OBS'] = 55197\n\n if observer:\n header['OBSGEO-L'] = 144.2\n header['OBSGEO-B'] = -20.2\n header['OBSGEO-H'] = 0.\n header['SPECSYS'] = 'BARYCENT'\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FITSFixedWarning)\n wcs = WCS(header)\n\n # First ensure that transformations round-trip\n\n spectralcoord = wcs.pixel_to_world(31)\n\n assert isinstance(spectralcoord, SpectralCoord)\n assert spectralcoord.target is None\n assert (spectralcoord.observer is not None) is observer\n\n if observer:\n expected_message = 'No target defined on SpectralCoord'\n else:\n expected_message = 'No observer defined on WCS'\n\n with pytest.warns(AstropyUserWarning, match=expected_message):\n pix = wcs.world_to_pixel(spectralcoord)\n\n assert_allclose(pix, [31], rtol=1e-6)\n\n # Also make sure that we can convert a SpectralCoord on which the observer\n # is not defined but the target is.\n\n with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):\n spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,\n doppler_rest=spectralcoord.doppler_rest,\n doppler_convention=spectralcoord.doppler_convention,\n target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))\n\n if observer:\n expected_message = 'No observer defined on SpectralCoord'\n else:\n expected_message = 'No observer defined on WCS'\n\n with pytest.warns(AstropyUserWarning, match=expected_message):\n pix2 = wcs.world_to_pixel(spectralcoord_no_obs)\n assert_allclose(pix2, [31], rtol=1e-6)\n\n # And finally check case when both observer and target are defined on the\n # SpectralCoord\n\n with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):\n spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,\n doppler_rest=spectralcoord.doppler_rest,\n doppler_convention=spectralcoord.doppler_convention,\n observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),\n target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))\n\n if observer:\n pix3 = wcs.world_to_pixel(spectralcoord_no_obs)\n else:\n with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):\n pix3 = wcs.world_to_pixel(spectralcoord_no_obs)\n\n assert_allclose(pix3, [31], rtol=1e-6)\n\n\nHEADER_SPECTRAL_WITH_TIME = \"\"\"\nWCSAXES = 3\nCTYPE1 = 'RA---TAN'\nCTYPE2 = 'DEC--TAN'\nCTYPE3 = 'WAVE'\nCRVAL1 = 98.83153\nCRVAL2 = -66.818\nCRVAL3 = 6.4205\nCRPIX1 = 21.\nCRPIX2 = 22.\nCRPIX3 = 1.\nCDELT1 = 3.6111E-05\nCDELT2 = 3.6111E-05\nCDELT3 = 0.001\nCUNIT1 = 'deg'\nCUNIT2 = 'deg'\nCUNIT3 = 'um'\nMJD-AVG = 59045.41466\nRADESYS = 'ICRS'\nSPECSYS = 'BARYCENT'\nTIMESYS = 'UTC'\n\"\"\"\n\n\n@pytest.fixture\ndef header_spectral_with_time():\n return Header.fromstring(HEADER_SPECTRAL_WITH_TIME, sep='\\n')\n\n\ndef test_spectral_with_time_kw(header_spectral_with_time):\n def check_wcs(header):\n assert_allclose(w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval)\n sky, spec = w.pixel_to_world(*w.wcs.crpix)\n assert_allclose((sky.spherical.lon.degree, sky.spherical.lat.degree, spec.value),\n w.wcs.crval, rtol=1e-3)\n\n # Chek with MJD-AVG and TIMESYS\n hdr = header_spectral_with_time.copy()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))\n w = WCS(hdr)\n # Make sure the correct keyword is used in a test\n assert ~np.isnan(w.wcs.mjdavg)\n assert np.isnan(w.wcs.mjdobs)\n\n check_wcs(w)\n\n # Check fall back to MJD-OBS\n hdr['MJD-OBS'] = hdr['MJD-AVG']\n del hdr['MJD-AVG']\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))\n w = WCS(hdr)\n # Make sure the correct keyword is used in a test\n assert ~np.isnan(w.wcs.mjdobs)\n assert np.isnan(w.wcs.mjdavg)\n check_wcs(w)\n\n # Check fall back to DATE--OBS\n hdr['DATE-OBS'] = '2020-07-15'\n del hdr['MJD-OBS']\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))\n w = WCS(hdr)\n w.wcs.mjdobs = np.nan\n # Make sure the correct keyword is used in a test\n assert np.isnan(w.wcs.mjdobs)\n assert np.isnan(w.wcs.mjdavg)\n assert w.wcs.dateobs != \"\"\n check_wcs(hdr)\n\n # Check fall back to scale='utc'\n del hdr['TIMESYS']\n check_wcs(hdr)\n"}}},{"rowIdx":1392,"cells":{"hash":{"kind":"string","value":"6c917b9ae11646e4554b59a97aea10c752fb96588d0c85c961c7f4aef33baae1"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport functools\nimport itertools\nimport operator\nfrom decimal import Decimal\nfrom datetime import timedelta\n\nimport pytest\nimport numpy as np\n\nfrom astropy.time import (\n Time, TimeDelta, OperandTypeError, ScaleValueError, TIME_SCALES,\n STANDARD_TIME_SCALES, TIME_DELTA_SCALES, TimeDeltaMissingUnitWarning,\n)\nfrom astropy.utils import iers\nfrom astropy import units as u\nfrom astropy.table import Table\n\nallclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)\nallclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,\n atol=2. ** -52) # 20 ps atol\nallclose_sec = functools.partial(np.allclose, rtol=2. ** -52,\n atol=2. ** -52 * 24 * 3600) # 20 ps atol\norig_auto_download = iers.conf.auto_download\n\n\ndef setup_module(module):\n \"\"\"Use offline IERS table only.\"\"\"\n iers.conf.auto_download = False\n\n\ndef teardown_module(module):\n \"\"\"Restore original setting.\"\"\"\n iers.conf.auto_download = orig_auto_download\n\n\nclass TestTimeDelta:\n \"\"\"Test TimeDelta class\"\"\"\n\n def setup(self):\n self.t = Time('2010-01-01', scale='utc')\n self.t2 = Time('2010-01-02 00:00:01', scale='utc')\n self.t3 = Time('2010-01-03 01:02:03', scale='utc', precision=9,\n in_subfmt='date_hms', out_subfmt='date_hm',\n location=(-75. * u.degree, 30. * u.degree, 500 * u.m))\n self.t4 = Time('2010-01-01', scale='local')\n self.dt = TimeDelta(100.0, format='sec')\n self.dt_array = TimeDelta(np.arange(100, 1000, 100), format='sec')\n\n def test_sub(self):\n # time - time\n dt = self.t2 - self.t\n assert (repr(dt).startswith(\"= 2\n assert np.all(comp == [False, True, True])\n\n with pytest.warns(TimeDeltaMissingUnitWarning):\n # 2 is also interpreted as days, not seconds\n assert (TimeDelta(5 * u.s) > 2) is False\n\n # with unit is ok\n assert TimeDelta(1 * u.s).to_value(u.s) == 1\n\n # with format is also ok\n assert TimeDelta(1, format=\"sec\").to_value(u.s) == 1\n assert TimeDelta(1, format=\"jd\").to_value(u.day) == 1\n\n # table column with units\n table = Table({\"t\": [1, 2, 3] * u.s})\n assert np.all(TimeDelta(table[\"t\"]).to_value(u.s) == [1, 2, 3])\n"}}},{"rowIdx":1393,"cells":{"hash":{"kind":"string","value":"4a508978a6adc72c65f73aaccdd78238b6b55a8a7af2db08805d8db6037af758"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os\nimport copy\nimport functools\nimport datetime\nfrom copy import deepcopy\nfrom decimal import Decimal, localcontext\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\nimport erfa\nfrom erfa import ErfaWarning\n\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\nfrom astropy.utils import isiterable, iers\nfrom astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,\n TimeString, TimezoneInfo, TIME_FORMATS)\nfrom astropy.coordinates import EarthLocation\nfrom astropy import units as u\nfrom astropy.table import Column, Table\nfrom astropy.utils.compat.optional_deps import HAS_PYTZ, HAS_H5PY # noqa\n\n\nallclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)\nallclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,\n atol=np.finfo(float).eps) # 20 ps atol\nallclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,\n atol=np.finfo(float).eps * 24 * 3600)\nallclose_year = functools.partial(np.allclose, rtol=np.finfo(float).eps,\n atol=0.) # 14 microsec at current epoch\n\n\ndef setup_function(func):\n func.FORMATS_ORIG = deepcopy(Time.FORMATS)\n\n\ndef teardown_function(func):\n Time.FORMATS.clear()\n Time.FORMATS.update(func.FORMATS_ORIG)\n\n\nclass TestBasic:\n \"\"\"Basic tests stemming from initial example and API reference\"\"\"\n\n def test_simple(self):\n times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']\n t = Time(times, format='iso', scale='utc')\n assert (repr(t) == \"