{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n"}}},{"rowIdx":32,"cells":{"text":{"kind":"string","value":"/* ************************************************************************** */\n/* */\n/* ::: :::::::: */\n/* ShrubberyCreationForm.cpp :+: :+: :+: */\n/* +:+ +:+ +:+ */\n/* By: bbonaldi +#+ +:+ +#+ */\n/* +#+#+#+#+#+ +#+ */\n/* Created: 2023/07/04 21:36:03 by bbonaldi #+# #+# */\n/* Updated: 2023/07/08 23:17:46 by bbonaldi ### ########.fr */\n/* */\n/* ************************************************************************** */\n\n#include \"ShrubberyCreationForm.hpp\"\n\nShrubberyCreationForm::ShrubberyCreationForm() : AForm(\"ShrubberyCreationForm\", 145, 137)\n{\n\tstd::cout << \"ShrubberyCreationForm Default Constructor called!\" << std::endl;\n}\n\nShrubberyCreationForm::ShrubberyCreationForm(std::string target) : AForm(\"ShrubberyCreationForm\", 145, 137)\n{\n\tstd::cout << \"ShrubberyCreationForm Named Constructor called!\" << std::endl;\n\tthis->_target = target;\n}\n\nShrubberyCreationForm::ShrubberyCreationForm(ShrubberyCreationForm const &src) : AForm(src.getName(), src.getGrade(), src.getExec())\n{\n\tstd::cout << \"ShrubberyCreationForm Copy Constructor called!\" << std::endl;\n\t*this = src;\n}\n\nShrubberyCreationForm::~ShrubberyCreationForm()\n{\n\tstd::cout << \"ShrubberyCreationForm Destructor called!\" << std::endl;\n}\n\nShrubberyCreationForm & ShrubberyCreationForm::operator=(ShrubberyCreationForm const &rhs)\n{\n\tstd::cout << \"ShrubberyCreationForm Copy Assignment called!\" << std::endl;\n\tif (this != &rhs)\n\t\tthis->_target = this->_target;\n\treturn (*this);\n}\n\nvoid ShrubberyCreationForm::execute(Bureaucrat const &executor) const\n{\n\tAForm::execute(executor);\n\t\t\n\tstd::ofstream file(this->_target.c_str());\n\tif (file.is_open())\n\t{\n\t\tfile << \" /\\\\ \" << std::endl;\n\t\tfile << \" /\\\\/\\\\ \" << std::endl;\n\t\tfile << \" /\\\\/\\\\/\\\\ \" << std::endl;\n\t\tfile << \" /\\\\/\\\\/\\\\/\\\\ \" << std::endl;\n\t\tfile << \" /\\\\/\\\\/\\\\/\\\\/\\\\ \" << std::endl;\n\t\tfile << \" /\\\\/\\\\/\\\\/\\\\/\\\\/\\\\ \" << std::endl;\n\t\tfile << \"/\\\\/\\\\/\\\\/\\\\/\\\\/\\\\/\\\\\" << std::endl;\n\t\tfile << \" || \" << std::endl;\n\t\tfile << \" || \" << std::endl;\n\t\tfile << \" || \" << std::endl;\n\t\tfile.close();\n\t}\n}"}}},{"rowIdx":33,"cells":{"text":{"kind":"string","value":"import { Request, Response, NextFunction } from \"express\";\nimport ErrorResponse from \"../utils/errorResponse\";\n\nconst errorHandler = (\n err: any,\n req: Request,\n res: Response,\n next: NextFunction\n) => {\n let error = { ...err };\n error.message = err.message;\n\n // Log to the console\n console.error(err);\n\n // If a response has already been sent, return early\n if (res.headersSent) {\n return next(err);\n }\n\n // Mongoose bad ObjectId\n if (err.name === \"CastError\") {\n const message = \"Resource not found\";\n error = new ErrorResponse(message, 404);\n }\n\n // Mongoose duplicate key error\n if (err.code === 11000) {\n const message = \"Duplicate field value entered\";\n error = new ErrorResponse(message, 400);\n }\n\n// Mongoose Validation Error\nif (err.name === \"ValidationError\") {\n const message = Object.values(err.errors).map((val: any) => val.message).join(', ');\n error = new ErrorResponse(message, 400);\n }\n \n\n res.status(error.statusCode || 500).json({\n success: false,\n error: error.message || \"Server Error\",\n });\n};\n\nexport default errorHandler;"}}},{"rowIdx":34,"cells":{"text":{"kind":"string","value":"import { Component, Input, OnDestroy, OnInit } from '@angular/core';\nimport { combineLatest, Subject, Subscription } from 'rxjs';\nimport { Army, BoardLocation, UnitType } from 'src/app/models/game-models';\nimport { GameContext } from 'src/app/models/game-utility-models';\nimport { GameContextService } from 'src/app/services/rx-logic/shared/game-context.service';\nimport { MaxArmyService } from 'src/app/services/rx-logic/shared/max-army.service';\nimport {\n getFrozenUnitsAtLocation,\n getUnitImage,\n subtractArmies,\n} from 'src/app/utils/army-utils';\nimport { findByFieldLocation } from 'src/app/utils/location-utils';\n\ntype Quartile = 1 | 2 | 3 | 4;\n\ntype UnitOccurence = {\n quartile: Quartile; // which quartile does the amount belong to? <0%; 25%), 25%-50%, 50%-75%, 75%-100%\n relativeAmount: number; // from 0 to 90, where does the amount belong INSIDE IT'S QUARTILE?\n unitType: UnitType;\n};\n\n@Component({\n selector: 'app-unit-circles',\n templateUrl: './unit-circles.component.html',\n styleUrls: ['./unit-circles.component.scss'],\n})\nexport class UnitCirclesComponent implements OnInit, OnDestroy {\n fieldLocationSubject = new Subject();\n\n maxArmy: Army = { droids: 100, tanks: 20, cannons: 30 };\n\n ownershipClass = '';\n\n // Quartile:\n unitOccurences: Array = [];\n\n sub1: Subscription;\n\n constructor(\n private maxArmyService: MaxArmyService,\n private gameContextService: GameContextService\n ) {\n this.sub1 = combineLatest([\n this.maxArmyService.getStateUpdates(),\n this.fieldLocationSubject.asObservable(),\n this.gameContextService.getStateUpdates(),\n ]).subscribe(([maxArmy, location, context]) => {\n this.unitOccurences = this.convertToUnitOccurenceList(\n maxArmy,\n location,\n context\n );\n this.ownershipClass = this.getOwnershipClass(location, context);\n });\n this.maxArmyService.requestState();\n this.gameContextService.requestState();\n }\n\n ngOnInit(): void {\n\n }\n\n getImagePath(unitType: UnitType): string {\n return getUnitImage(unitType);\n }\n\n private getOwnershipClass(fieldLocation: BoardLocation, gameContext: GameContext): string {\n const field = findByFieldLocation(fieldLocation, gameContext.game.fields);\n const owns = field.ownerId === gameContext.player.id;\n return owns ? 'owned' : 'enemy';\n }\n\n private convertToUnitOccurenceList(\n maxArmy: Army,\n fieldLocation: BoardLocation,\n context: GameContext\n ): Array {\n const baseArmy = findByFieldLocation(\n fieldLocation,\n context.game.fields\n ).army || { droids: 0, tanks: 0, cannons: 0 };\n const frozenUnits = getFrozenUnitsAtLocation(\n fieldLocation,\n context.currentActions\n );\n const currentArmy = subtractArmies(baseArmy, frozenUnits);\n return [\n this.convertSingleToUnitOccurence(\n maxArmy.droids,\n currentArmy?.droids,\n 'DROID'\n ),\n this.convertSingleToUnitOccurence(\n maxArmy.tanks,\n currentArmy?.tanks,\n 'TANK'\n ),\n this.convertSingleToUnitOccurence(\n maxArmy.cannons,\n currentArmy?.cannons,\n 'CANNON'\n ),\n ].filter((o) => o !== null);\n }\n\n private convertSingleToUnitOccurence(\n maxUnits: number,\n currentUnits: number,\n unitType: UnitType\n ): UnitOccurence | null {\n if (\n currentUnits === null ||\n currentUnits < 1 ||\n maxUnits === null ||\n maxUnits < 1\n ) {\n return null;\n }\n const ratio = currentUnits / maxUnits;\n const quartile = this.getQuartile(ratio);\n const quartileRangeLower = (quartile - 1) * 0.25;\n const distanceBetweenLowerAndUpper = 0.25;\n const distanceFromLower = ratio - quartileRangeLower;\n const quartileRangeRatio = distanceFromLower / distanceBetweenLowerAndUpper;\n const relativeAmount = quartileRangeRatio * 90;\n\n return {\n quartile,\n relativeAmount,\n unitType,\n };\n }\n\n private getQuartile(ratio: number): Quartile {\n if (ratio < 0.25) {\n return 1;\n }\n if (ratio < 0.5) {\n return 2;\n }\n if (ratio < 0.75) {\n return 3;\n }\n return 4;\n }\n\n @Input()\n set fieldLocation(location: BoardLocation) {\n this.fieldLocationSubject.next(location);\n }\n\n ngOnDestroy(): void {\n this.sub1.unsubscribe();\n }\n}"}}},{"rowIdx":35,"cells":{"text":{"kind":"string","value":"import { FunctionComponent } from \"react\";\nimport { AbsoluteFill, Easing, interpolate, useCurrentFrame } from \"remotion\";\nimport Layout from \"./Layout\";\n\ninterface AboutProps {}\n\ninterface TextProps {\n children: React.ReactNode;\n index: number;\n isLast?: boolean;\n}\n\nconst Text: FunctionComponent = ({ children, index, isLast }) => {\n const frame = useCurrentFrame();\n const start = 5 + 40 * index;\n const end = start + 40;\n const appearingY = interpolate(frame, [start, end], [50, 0], {\n extrapolateRight: \"clamp\",\n easing: Easing.out(Easing.ease),\n });\n\n const disappearingY = interpolate(frame, [end + 10, end + 30], [0, -50], {\n extrapolateRight: \"clamp\",\n easing: Easing.out(Easing.ease),\n });\n\n return (\n end && !isLast ? disappearingY : appearingY\n }px)`,\n }}\n className=\"ml-4 uppercase font-bold absolute top-0 z-10 left-0\"\n >\n {children}\n \n );\n};\n\nconst skills = [\n \"React\",\n \"Svelte\",\n \"Next js\",\n \"Nodejs\",\n \"Javascript\",\n \"Frontend\",\n];\nconst About: FunctionComponent = () => {\n const frame = useCurrentFrame();\n const welcomeOpacity = interpolate(\n frame,\n [5 + 40 * skills.length, 5 + 40 * skills.length + 30],\n [0, 1]\n );\n\n return (\n \n
\n
\n Hello, This is Duc Mai\n
\n
\n \n proficient in\n \n
\n Long word\n {skills.map((sk, index) => (\n \n {sk}\n \n ))}\n
\n
\n \n Welcome to my world{\" \"}\n \n https://hittaducmai.se\n \n
\n
\n \n );\n};\n\nexport default About;"}}},{"rowIdx":36,"cells":{"text":{"kind":"string","value":"const express = require(\"express\");\nconst { check, validationResult } = require(\"express-validator\");\nconst usersRepo = require(\"../../repositories/users\");\nconst signupTemplate = require(\"../../views/admin/auth/signup\");\nconst signinTemplate = require(\"../../views/admin/auth/signin\");\n\nconst {\n requireEmail,\n requirePassword,\n requirePasswordConfirmation,\n} = require(\"./validators\");\nconst router = express.Router();\n\nrouter.get(\"/signup\", (req, res) => {\n res.send(signupTemplate({ req }));\n});\n\nrouter.post(\n \"/signup\",\n [requireEmail, requirePassword, requirePasswordConfirmation],\n async (req, res) => {\n const errors = validationResult(req);\n if (!errors.isEmpty()) {\n return res.send(signupTemplate({ req, errors }));\n }\n const { email, password, passwordConfirmation } = req.body;\n\n // create a usre in our repo to represent this person\n const user = await usersRepo.create({ email, password });\n // Store the id that user inside the users cookie\n req.session.userId = user.id;\n\n res.send(\"Account created!\");\n }\n);\n\nrouter.get(\"/signout\", (req, res) => {\n req.session = null;\n res.send(\"You are logged out\");\n});\n\nrouter.get(\"/signin\", (req, res) => {\n res.send(signinTemplate());\n});\n\nrouter.post(\"/signin\", async (req, res) => {\n const { email, password } = req.body;\n\n const user = await usersRepo.getOneBy({ email });\n\n if (!user) {\n return res.send(\"Email is not found!\");\n }\n const validPassword = await usersRepo.comparePasswords(\n user.password,\n password\n );\n if (!validPassword) {\n return res.send(\"Invalid password\");\n }\n\n req.session.userId = user.id;\n res.send(\"You are signd in\");\n});\n\nmodule.exports = router;"}}},{"rowIdx":37,"cells":{"text":{"kind":"string","value":"import torch\nimport torch.nn as nn\nfrom functools import partial\nimport clip\nfrom einops import rearrange, repeat\nfrom transformers import CLIPTokenizer, CLIPTextModel\nimport kornia\nfrom ldm.dream.devices import choose_torch_device\n\nfrom ldm.modules.x_transformer import (\n Encoder,\n TransformerWrapper,\n) # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test\n\n\ndef _expand_mask(mask, dtype, tgt_len=None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = (\n mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n )\n\n inverted_mask = 1.0 - expanded_mask\n\n return inverted_mask.masked_fill(\n inverted_mask.to(torch.bool), torch.finfo(dtype).min\n )\n\n\ndef _build_causal_attention_mask(bsz, seq_len, dtype):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)\n mask.fill_(torch.tensor(torch.finfo(dtype).min))\n mask.triu_(1) # zero out the lower diagonal\n mask = mask.unsqueeze(1) # expand mask\n return mask\n\n\nclass AbstractEncoder(nn.Module):\n def __init__(self):\n super().__init__()\n\n def encode(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass ClassEmbedder(nn.Module):\n def __init__(self, embed_dim, n_classes=1000, key='class'):\n super().__init__()\n self.key = key\n self.embedding = nn.Embedding(n_classes, embed_dim)\n\n def forward(self, batch, key=None):\n if key is None:\n key = self.key\n # this is for use in crossattn\n c = batch[key][:, None]\n c = self.embedding(c)\n return c\n\n\nclass TransformerEmbedder(AbstractEncoder):\n \"\"\"Some transformer encoder layers\"\"\"\n\n def __init__(\n self,\n n_embed,\n n_layer,\n vocab_size,\n max_seq_len=77,\n device=choose_torch_device(),\n ):\n super().__init__()\n self.device = device\n self.transformer = TransformerWrapper(\n num_tokens=vocab_size,\n max_seq_len=max_seq_len,\n attn_layers=Encoder(dim=n_embed, depth=n_layer),\n )\n\n def forward(self, tokens):\n tokens = tokens.to(self.device) # meh\n z = self.transformer(tokens, return_embeddings=True)\n return z\n\n def encode(self, x):\n return self(x)\n\n\nclass BERTTokenizer(AbstractEncoder):\n \"\"\"Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)\"\"\"\n\n def __init__(\n self, device=choose_torch_device(), vq_interface=True, max_length=77\n ):\n super().__init__()\n from transformers import (\n BertTokenizerFast,\n ) # TODO: add to reuquirements\n\n # Modified to allow to run on non-internet connected compute nodes.\n # Model needs to be loaded into cache from an internet-connected machine\n # by running:\n # from transformers import BertTokenizerFast\n # BertTokenizerFast.from_pretrained(\"bert-base-uncased\")\n try:\n self.tokenizer = BertTokenizerFast.from_pretrained(\n 'bert-base-uncased', local_files_only=False\n )\n except OSError:\n raise SystemExit(\n \"* Couldn't load Bert tokenizer files. Try running scripts/preload_models.py from an internet-conected machine.\"\n )\n self.device = device\n self.vq_interface = vq_interface\n self.max_length = max_length\n\n def forward(self, text):\n batch_encoding = self.tokenizer(\n text,\n truncation=True,\n max_length=self.max_length,\n return_length=True,\n return_overflowing_tokens=False,\n padding='max_length',\n return_tensors='pt',\n )\n tokens = batch_encoding['input_ids'].to(self.device)\n return tokens\n\n @torch.no_grad()\n def encode(self, text):\n tokens = self(text)\n if not self.vq_interface:\n return tokens\n return None, None, [None, None, tokens]\n\n def decode(self, text):\n return text\n\n\nclass BERTEmbedder(AbstractEncoder):\n \"\"\"Uses the BERT tokenizr model and add some transformer encoder layers\"\"\"\n\n def __init__(\n self,\n n_embed,\n n_layer,\n vocab_size=30522,\n max_seq_len=77,\n device=choose_torch_device(),\n use_tokenizer=True,\n embedding_dropout=0.0,\n ):\n super().__init__()\n self.use_tknz_fn = use_tokenizer\n if self.use_tknz_fn:\n self.tknz_fn = BERTTokenizer(\n vq_interface=False, max_length=max_seq_len\n )\n self.device = device\n self.transformer = TransformerWrapper(\n num_tokens=vocab_size,\n max_seq_len=max_seq_len,\n attn_layers=Encoder(dim=n_embed, depth=n_layer),\n emb_dropout=embedding_dropout,\n )\n\n def forward(self, text, embedding_manager=None):\n if self.use_tknz_fn:\n tokens = self.tknz_fn(text) # .to(self.device)\n else:\n tokens = text\n z = self.transformer(\n tokens, return_embeddings=True, embedding_manager=embedding_manager\n )\n return z\n\n def encode(self, text, **kwargs):\n # output of length 77\n return self(text, **kwargs)\n\n\nclass SpatialRescaler(nn.Module):\n def __init__(\n self,\n n_stages=1,\n method='bilinear',\n multiplier=0.5,\n in_channels=3,\n out_channels=None,\n bias=False,\n ):\n super().__init__()\n self.n_stages = n_stages\n assert self.n_stages >= 0\n assert method in [\n 'nearest',\n 'linear',\n 'bilinear',\n 'trilinear',\n 'bicubic',\n 'area',\n ]\n self.multiplier = multiplier\n self.interpolator = partial(\n torch.nn.functional.interpolate, mode=method\n )\n self.remap_output = out_channels is not None\n if self.remap_output:\n print(\n f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.'\n )\n self.channel_mapper = nn.Conv2d(\n in_channels, out_channels, 1, bias=bias\n )\n\n def forward(self, x):\n for stage in range(self.n_stages):\n x = self.interpolator(x, scale_factor=self.multiplier)\n\n if self.remap_output:\n x = self.channel_mapper(x)\n return x\n\n def encode(self, x):\n return self(x)\n\n\nclass FrozenCLIPEmbedder(AbstractEncoder):\n \"\"\"Uses the CLIP transformer encoder for text (from Hugging Face)\"\"\"\n\n def __init__(\n self,\n version='openai/clip-vit-large-patch14',\n device=choose_torch_device(),\n max_length=77,\n ):\n super().__init__()\n self.tokenizer = CLIPTokenizer.from_pretrained(\n version, local_files_only=False\n )\n self.transformer = CLIPTextModel.from_pretrained(\n version, local_files_only=False\n )\n self.device = device\n self.max_length = max_length\n self.freeze()\n\n def embedding_forward(\n self,\n input_ids=None,\n position_ids=None,\n inputs_embeds=None,\n embedding_manager=None,\n ) -> torch.Tensor:\n\n seq_length = (\n input_ids.shape[-1]\n if input_ids is not None\n else inputs_embeds.shape[-2]\n )\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if inputs_embeds is None:\n inputs_embeds = self.token_embedding(input_ids)\n\n if embedding_manager is not None:\n inputs_embeds = embedding_manager(input_ids, inputs_embeds)\n\n position_embeddings = self.position_embedding(position_ids)\n embeddings = inputs_embeds + position_embeddings\n\n return embeddings\n\n self.transformer.text_model.embeddings.forward = (\n embedding_forward.__get__(self.transformer.text_model.embeddings)\n )\n\n def encoder_forward(\n self,\n inputs_embeds,\n attention_mask=None,\n causal_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict\n if return_dict is not None\n else self.config.use_return_dict\n )\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n hidden_states = inputs_embeds\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n layer_outputs = encoder_layer(\n hidden_states,\n attention_mask,\n causal_attention_mask,\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n return hidden_states\n\n self.transformer.text_model.encoder.forward = encoder_forward.__get__(\n self.transformer.text_model.encoder\n )\n\n def text_encoder_forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n embedding_manager=None,\n ):\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict\n if return_dict is not None\n else self.config.use_return_dict\n )\n\n if input_ids is None:\n raise ValueError('You have to specify either input_ids')\n\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n\n hidden_states = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n embedding_manager=embedding_manager,\n )\n\n bsz, seq_len = input_shape\n # CLIP's text model uses causal mask, prepare it here.\n # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324\n causal_attention_mask = _build_causal_attention_mask(\n bsz, seq_len, hidden_states.dtype\n ).to(hidden_states.device)\n\n # expand attention_mask\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(\n attention_mask, hidden_states.dtype\n )\n\n last_hidden_state = self.encoder(\n inputs_embeds=hidden_states,\n attention_mask=attention_mask,\n causal_attention_mask=causal_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = self.final_layer_norm(last_hidden_state)\n\n return last_hidden_state\n\n self.transformer.text_model.forward = text_encoder_forward.__get__(\n self.transformer.text_model\n )\n\n def transformer_forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n embedding_manager=None,\n ):\n return self.text_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n embedding_manager=embedding_manager,\n )\n\n self.transformer.forward = transformer_forward.__get__(\n self.transformer\n )\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, text, **kwargs):\n batch_encoding = self.tokenizer(\n text,\n truncation=True,\n max_length=self.max_length,\n return_length=True,\n return_overflowing_tokens=False,\n padding='max_length',\n return_tensors='pt',\n )\n tokens = batch_encoding['input_ids'].to(self.device)\n z = self.transformer(input_ids=tokens, **kwargs)\n\n return z\n\n def encode(self, text, **kwargs):\n return self(text, **kwargs)\n\n\nclass FrozenCLIPTextEmbedder(nn.Module):\n \"\"\"\n Uses the CLIP transformer encoder for text.\n \"\"\"\n\n def __init__(\n self,\n version='ViT-L/14',\n device=choose_torch_device(),\n max_length=77,\n n_repeat=1,\n normalize=True,\n ):\n super().__init__()\n self.model, _ = clip.load(version, jit=False, device=device)\n self.device = device\n self.max_length = max_length\n self.n_repeat = n_repeat\n self.normalize = normalize\n\n def freeze(self):\n self.model = self.model.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, text):\n tokens = clip.tokenize(text).to(self.device)\n z = self.model.encode_text(tokens)\n if self.normalize:\n z = z / torch.linalg.norm(z, dim=1, keepdim=True)\n return z\n\n def encode(self, text):\n z = self(text)\n if z.ndim == 2:\n z = z[:, None, :]\n z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)\n return z\n\n\nclass FrozenClipImageEmbedder(nn.Module):\n \"\"\"\n Uses the CLIP image encoder.\n \"\"\"\n\n def __init__(\n self,\n model,\n jit=False,\n device=choose_torch_device(),\n antialias=False,\n ):\n super().__init__()\n self.model, _ = clip.load(name=model, device=device, jit=jit)\n\n self.antialias = antialias\n\n self.register_buffer(\n 'mean',\n torch.Tensor([0.48145466, 0.4578275, 0.40821073]),\n persistent=False,\n )\n self.register_buffer(\n 'std',\n torch.Tensor([0.26862954, 0.26130258, 0.27577711]),\n persistent=False,\n )\n\n def preprocess(self, x):\n # normalize to [0,1]\n x = kornia.geometry.resize(\n x,\n (224, 224),\n interpolation='bicubic',\n align_corners=True,\n antialias=self.antialias,\n )\n x = (x + 1.0) / 2.0\n # renormalize according to clip\n x = kornia.enhance.normalize(x, self.mean, self.std)\n return x\n\n def forward(self, x):\n # x is assumed to be in range [-1,1]\n return self.model.encode_image(self.preprocess(x))\n\n\nif __name__ == '__main__':\n from ldm.util import count_params\n\n model = FrozenCLIPEmbedder()\n count_params(model, verbose=True)"}}},{"rowIdx":38,"cells":{"text":{"kind":"string","value":"---\ntitle: Diseño de Páginas Web para Agencias de Seguros en Elche\ndate: '2023-10-04'\ntags: ['Diseño web', 'Agencias de Seguros', 'Elche']\ndraft: false\nbanner : diseño_paginas_web_agenciasdeseguros\nfondoBanner : diseño_pagina_web_elche\nsummary: El diseño de páginas web se ha convertido en una herramienta fundamental para las agencias de seguros en la ciudad de Elche. Con el crecimiento constante de la industria, es crucial que las agencias aprovechen todas las oportunidades digitales para aumentar su visibilidad y generar ventas.\n---\nimport Cta from \"../../../components/Cta.astro\";\nimport CtaBot from \"../../../components/CtaBot.astro\";\nimport GifSeo from \"../../../components/GifSeo.astro\";\nimport Gmaps from \"../../../components/Gmaps.astro\";\nimport Video from \"../../../components/Video.astro\";\n\n## Diseño de Páginas Web para Agencias de Seguros en Elche \n\nEl diseño de páginas web se ha convertido en una herramienta fundamental para las agencias de seguros en la ciudad de Elche. Con el crecimiento constante de la industria, es crucial que las agencias aprovechen todas las oportunidades digitales para aumentar su visibilidad y generar ventas.\n\n### Motivos por los que una Agencia de Seguros en Elche necesita una página web:\n\n1. **Mayor alcance**: Una página web permite que las agencias de seguros en Elche sean accesibles las 24 horas del día, los 7 días de la semana, llegando a un público más amplio que busca información y servicios en línea.\n\n2. **Exposición en buscadores**: El diseño web adecuado y optimizado para SEO coloca a las agencias de seguros en Elche en los primeros resultados de búsqueda, aumentando la visibilidad y la posibilidad de ser encontrados por clientes potenciales.\n\n3. **Mejora la credibilidad**: Contar con una página web profesional transmite confianza en los servicios que ofrece la agencia de seguros en Elche. Los usuarios confían en las empresas con una presencia en línea sólida y actualizada.\n\n4. **Facilidad de contacto**: Una página web bien estructurada y diseñada proporciona a los clientes potenciales una forma sencilla de ponerse en contacto con la agencia de seguros en Elche, aumentando las posibilidades de conversión en ventas.\n\n### Beneficios del diseño web para una Agencia de Seguros en Elche:\n\n1. **Diferenciación**: El diseño web personalizado y atractivo permite a las agencias de seguros en Elche destacarse de la competencia y mostrar su propuesta de valor única.\n\n2. **Información clara**: Una página web bien diseñada y organizada proporciona a los clientes toda la información necesaria sobre los servicios y productos ofrecidos por la agencia de seguros en Elche, facilitando la toma de decisiones.\n\n3. **Optimización para dispositivos móviles**: Con un diseño web responsivo, las agencias de seguros en Elche aseguran una experiencia de usuario óptima tanto en computadoras de escritorio como en dispositivos móviles, adaptándose a las preferencias y comportamientos de los usuarios.\n\n4. **Aumento de conversiones**: Una página web profesionalmente diseñada y optimizada para la conversión convierte a los visitantes en clientes, generando ventas y ayudando al crecimiento del negocio de la agencia de seguros en Elche.\n\nElche, conocida como la ciudad de las palmeras, cuenta con una rica tradición agrícola y un creciente sector empresarial. El diseño de páginas web para agencias de seguros en Elche contribuye al desarrollo digital de la ciudad, combinando la industria de los seguros con las nuevas tecnologías.\n\nNo pierda la oportunidad de expandir su negocio de seguros en Elche a través de una página web atractiva y efectiva. Contacte con nosotros en 'Élite Webs' y le ayudaremos a alcanzar el éxito en el mundo digital.\n\n\n\n\n\n## Elementos clave del diseño web exitoso\n\n- Diseño atractivo y profesional.\n- Navegación intuitiva y fácil de usar.\n- Contenido relevante y bien estructurado.\n- Velocidad de carga rápida.\n- Adaptabilidad a diferentes dispositivos y pantallas.\n- Optimización para motores de búsqueda.\n- Integración de formularios de contacto y herramientas de seguimiento.\n\nEn Élite Webs somos expertos en el diseño y desarrollo de páginas web para agencias de seguros en Elche. Sabemos que estos elementos son fundamentales para convertir las visitas en ventas, y por eso nos especializamos en ofrecer soluciones que cumplan con todos ellos.\n\n## Cómo nuestro servicio de diseño web puede ayudar\n\nNuestro equipo de diseñadores y desarrolladores se encargará de crear una página web única y personalizada para tu agencia de seguros en Elche. Nos aseguramos de que el diseño sea atractivo y profesional, captando la atención del visitante desde el primer momento.\n\nAdemás, nos enfocamos en una navegación intuitiva y fácil de usar, para que los usuarios encuentren rápidamente la información que están buscando. Organizamos el contenido de manera clara y estructurada, resaltando los servicios y productos que tu agencia ofrece.\n\nLa velocidad de carga de la página web también es un factor crucial. Estudios han demostrado que los usuarios abandonan un sitio si tarda más de 3 segundos en cargar. Por eso, optimizamos el rendimiento de tu sitio para que cargue rápidamente, evitando que los visitantes se vayan sin convertir.\n\nAdemás, nos aseguramos de que tu página web se adapte a distintos dispositivos y pantallas, ya que cada vez más personas utilizan sus móviles y tablets para navegar por internet. Esto garantiza una experiencia óptima tanto para los usuarios que buscan seguros desde su ordenador como para aquellos que lo hacen desde su móvil.\n\nPor último, aplicamos técnicas de optimización SEO para mejorar el posicionamiento de tu página web en los motores de búsqueda. Esto te ayudará a aumentar la visibilidad y atraer más visitas de calidad, incrementando así las oportunidades de venta.\n\n## Conclusión: la importancia de una página web de calidad y por qué contratarnos\n\nEn este competitivo mercado de agencias de seguros en Elche, tener una página web de calidad es esencial para destacar y atraer clientes potenciales. Una página web bien diseñada y optimizada no solo captará la atención de los visitantes, sino que también generará confianza y credibilidad en tu agencia.\n\nEn Élite Webs somos conscientes de la importancia de este factor y, por eso, nos esforzamos en crear páginas web que se destaquen por encima de la competencia. Nuestra experiencia y conocimientos nos permiten ofrecerte un servicio de diseño web que cumplirá con tus expectativas y te ayudará a convertir visitas en ventas.\n\nNo dejes que tu agencia de seguros en Elche se quede atrás. Contrata nuestros servicios de diseño web y destaca en este mercado tan competitivo. Contacta con nosotros hoy mismo y descubre cómo podemos ayudarte a alcanzar el éxito en línea.\n\n
YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/datasets-cards)

Linear Next Benchmark

Linear Next is a comprehensive benchmark designed to fairly compare various efficient transformer architectures. This project evaluates different approaches including linear attention, sparse attention, and other model structures under identical training conditions and datasets.

Overview

The benchmark aims to provide an unbiased comparison of efficient transformer variants by ensuring all models are trained with the same datasets, hyperparameters, and evaluation metrics. This allows for a clear understanding of the relative strengths and weaknesses of each approach.

Datasets

The benchmark utilizes a diverse collection of high-quality datasets:

General Text

  • DCLM-pro: A large-scale dataset containing diverse text from various domains, designed for general language modeling tasks.
  • Cosmopedia-v2: A curated corpus of high-quality web content covering a wide range of topics, with emphasis on educational and informative material.
  • Fineweb-edu: A filtered collection of educational web content, focusing on instructional and academic text from reliable sources.

Code

  • The Stack v2: A comprehensive collection of source code spanning multiple programming languages, designed to train models on code understanding and generation tasks.

Mathematics

  • Finemath: A specialized dataset containing mathematical content, including equations, proofs, and mathematical explanations across various difficulty levels.

Reasoning

  • Natural Reasoning: A dataset focused on logical reasoning, problem-solving, and inference tasks, designed to improve models' reasoning capabilities.

Methodology

All models in the Linear Next benchmark are evaluated using identical:

  • Training datasets and data mixing ratios
  • Optimization parameters
  • Hardware configurations
  • Evaluation metrics

This controlled environment ensures that performance differences can be attributed to the architectural differences rather than training conditions.

Results

Detailed benchmark results, including training curves, inference speed, memory usage, and performance metrics across different tasks, are available in the project repository.

Downloads last month
63,687