path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
scrapling.engines.toolbelt.custom/Response.adaptor
|
Modified
|
D4Vinci~Scrapling
|
1ff1c93bcf2dcccb421de768cbb1fffd19acbbd9
|
Give response's body bytes priority over response's text
|
<0>:<add> if self.content:
<add> return Adaptor(body=self.content, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
<add> elif self.text:
<del> if self.text:
<2>:<del> elif self.content:
<3>:<del> return Adaptor(body=self.content, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
|
# module: scrapling.engines.toolbelt.custom
@dataclass(frozen=True)
class Response:
@property
def adaptor(self):
<0> if self.text:
<1> return Adaptor(text=self.text, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
<2> elif self.content:
<3> return Adaptor(body=self.content, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
<4> return None
<5>
|
===========unchanged ref 0===========
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.parser
Adaptor(text: Optional[str]=None, url: Optional[str]=None, body: bytes=b"", encoding: str="utf8", huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=False, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True)
|
tests.test_parser_functions/TestParser.test_expected_errors
|
Modified
|
D4Vinci~Scrapling
|
55ab1d63b7c69da02fe555f1f616acb8e8b43e48
|
Turn auto_match off when not needed in testing
|
<2>:<add> _ = Adaptor(auto_match=False)
<del> _ = Adaptor()
<5>:<add> _ = Adaptor(root="ayo", auto_match=False)
<del> _ = Adaptor(root="ayo")
<8>:<add> _ = Adaptor(text=1, auto_match=False)
<del> _ = Adaptor(text=1)
<11>:<add> _ = Adaptor(body=1, auto_match=False)
<del> _ = Adaptor(body=1)
|
# module: tests.test_parser_functions
class TestParser(unittest.TestCase):
def test_expected_errors(self):
<0> """Test errors that should raised if it does"""
<1> with self.assertRaises(ValueError):
<2> _ = Adaptor()
<3>
<4> with self.assertRaises(TypeError):
<5> _ = Adaptor(root="ayo")
<6>
<7> with self.assertRaises(TypeError):
<8> _ = Adaptor(text=1)
<9>
<10> with self.assertRaises(TypeError):
<11> _ = Adaptor(body=1)
<12>
<13> with self.assertRaises(ValueError):
<14> _ = Adaptor(self.html, storage=object, auto_match=True)
<15>
| |
scrapling.engines.toolbelt.custom/BaseFetcher.__init__
|
Modified
|
D4Vinci~Scrapling
|
be350f21dd8e7b5f2b9fa68f04f323e79d862709
|
Adding documentation to the base fetcher...
|
<0>:<add> """Arguments below are the same from the Adaptor class so you can pass them directly, the rest of Adaptor's arguments
<add> are detected and passed automatically from the Fetcher based on the response for accessibility.
<add>
<add> :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
<add> libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
<add> :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
<add> :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
<add> priority over all auto-match related arguments/functions in the class.
<add> :param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
<add> :param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
<add> If empty, default values will be used.
<add> :param debug: Enable debug mode
<add> """
<add> # Adaptor class
|
<s>] = False,
- auto_match: Optional[bool] = False,
- storage: Any = SQLiteStorageSystem,
- storage_args: Optional[Dict] = None,
- debug: Optional[bool] = True,
+ self, huge_tree: bool = True, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = True,
+ storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True,
):
<0> # I won't validate Adaptor's class parameters here again, I will leave it to be validated later
<1> self.adaptor_arguments = dict(
<2> huge_tree=huge_tree,
<3> keep_comments=keep_comments,
<4> auto_match=auto_match,
<5> storage=storage,
<6> storage_args=storage_args,
<7> debug=debug,
<8> )
<9>
|
===========unchanged ref 0===========
at: scrapling.core.storage_adaptors
SQLiteStorageSystem(storage_file: str, url: Union[str, None]=None)
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
e7300ecba0dce652a7754a7012f06cfc7cf68175
|
StealthFetcher - Add the option to allow webgl in the browser
|
<3>:<add> self.allow_webgl = bool(allow_webgl)
|
<s>bool, str] = True,
block_images: Optional[bool] = False,
block_webrtc: Optional[bool] = False,
+ allow_webgl: Optional[bool] = False,
network_idle: Optional[bool] = False,
timeout: Optional[float] = 30000,
page_action: Callable = do_nothing,
wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
adaptor_arguments: Dict = None
):
<0> self.headless = headless
<1> self.block_images = bool(block_images)
<2> self.block_webrtc = bool(block_webrtc)
<3> self.network_idle = bool(network_idle)
<4> self.timeout = check_type_validity(timeout, [int, float], 30000)
<5> if callable(page_action):
<6> self.page_action = page_action
<7> else:
<8> self.page_action = do_nothing
<9> logging.error('[Ignored] Argument "page_action" must be callable')
<10>
<11> self.wait_selector = wait_selector
<12> self.wait_selector_state = wait_selector_state
<13> self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
<14>
|
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
e7300ecba0dce652a7754a7012f06cfc7cf68175
|
StealthFetcher - Add the option to allow webgl in the browser
|
<5>:<add> allow_webgl=self.allow_webgl,
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> with Camoufox(
<1> headless=self.headless,
<2> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<3> os=get_os_name(),
<4> block_webrtc=self.block_webrtc,
<5> ) as browser:
<6> page = browser.new_page()
<7> page.set_default_navigation_timeout(self.timeout)
<8> page.set_default_timeout(self.timeout)
<9> res = page.goto(url, referer=generate_convincing_referer(url))
<10> page.wait_for_load_state(state="load")
<11> page.wait_for_load_state(state="domcontentloaded")
<12> if self.network_idle:
<13> page.wait_for_load_state('networkidle')
<14>
<15> page = self.page_action(page)
<16>
<17> if self.wait_selector and type(self.wait_selector) is str:
<18> waiter = page.locator(self.wait_selector)
<19> waiter.wait_for(state=self.wait_selector_state)
<20>
<21> content_type = res.headers.get('content-type', '')
<22> # Parse charset from content-type
<23> encoding = 'utf-8' # default encoding
<24> if 'charset=' in content_type.lower():
<25> encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
<26>
<27> response = Response(
<28> url=res.url,
<29> text=res.text(),
<30> content=res.body(),
<31> status=res.status,
<32> reason=res.status_text,
<33> encoding=encoding,
<34> cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
<35> headers=res.all_headers(),
</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url)
get_os_name()
===========changed ref 0===========
<s>bool, str] = True,
block_images: Optional[bool] = False,
block_webrtc: Optional[bool] = False,
+ allow_webgl: Optional[bool] = False,
network_idle: Optional[bool] = False,
timeout: Optional[float] = 30000,
page_action: Callable = do_nothing,
wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
adaptor_arguments: Dict = None
):
self.headless = headless
self.block_images = bool(block_images)
self.block_webrtc = bool(block_webrtc)
+ self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
|
scrapling.fetcher/StealthyFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
e7e499d64b455112cc82f71ad837e8ce6dadefd5
|
Add documentation for StealthyFetcher class + add webgl option
|
<0>:<add> """
<add> Opens up a browser and do your request based on your chosen options below.
<add> :param url: Target url.
<add> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<add> :param block_images: Prevent the loading of images through Firefox preferences.
<add> This can help save your proxy usage but careful with this option as it makes some websites never finish loading.
<add> :param block_webrtc: Blocks WebRTC entirely.
<add> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<add> :param network_idle: Wait for the page to not do do any requests.
<add> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<add> :param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
<add> :param wait_selector: Wait for a specific css selector to be in a specific state
|
<s> def fetch(
self, url: str, headless: Union[bool, str] = True, block_images: Optional[bool] = False, block_webrtc: Optional[bool] = False,
+ allow_webgl: Optional[bool] = False,
network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
) -> Response:
<0> engine = CamoufoxEngine(
<1> timeout=timeout,
<2> headless=headless,
<3> page_action=page_action,
<4> block_images=block_images,
<5> block_webrtc=block_webrtc,
<6> network_idle=network_idle,
<7> wait_selector=wait_selector,
<8> wait_selector_state=wait_selector_state,
<9> adaptor_arguments=self.adaptor_arguments,
<10> )
<11> return engine.fetch(url)
<12>
|
===========unchanged ref 0===========
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
|
scrapling.engines.camo/CamoufoxEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
b32e1203f21f31d5eb172dd7ded1a39567723d98
|
StealthyFetcher - The option to drop unnecessary resources requests for speed boost
|
<2>:<add> self.disable_resources = bool(disable_resources)
|
<s> Optional[bool] = False,
+ disable_resources: Optional[bool] = False,
block_webrtc: Optional[bool] = False,
allow_webgl: Optional[bool] = False,
network_idle: Optional[bool] = False,
timeout: Optional[float] = 30000,
page_action: Callable = do_nothing,
wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
adaptor_arguments: Dict = None
):
<0> self.headless = headless
<1> self.block_images = bool(block_images)
<2> self.block_webrtc = bool(block_webrtc)
<3> self.allow_webgl = bool(allow_webgl)
<4> self.network_idle = bool(network_idle)
<5> self.timeout = check_type_validity(timeout, [int, float], 30000)
<6> if callable(page_action):
<7> self.page_action = page_action
<8> else:
<9> self.page_action = do_nothing
<10> logging.error('[Ignored] Argument "page_action" must be callable')
<11>
<12> self.wait_selector = wait_selector
<13> self.wait_selector_state = wait_selector_state
<14> self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
<15>
|
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
b32e1203f21f31d5eb172dd7ded1a39567723d98
|
StealthyFetcher - The option to drop unnecessary resources requests for speed boost
|
<10>:<add> if self.disable_resources:
<add> page.route("**/*", intercept_route)
<add>
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> with Camoufox(
<1> headless=self.headless,
<2> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<3> os=get_os_name(),
<4> block_webrtc=self.block_webrtc,
<5> allow_webgl=self.allow_webgl,
<6> ) as browser:
<7> page = browser.new_page()
<8> page.set_default_navigation_timeout(self.timeout)
<9> page.set_default_timeout(self.timeout)
<10> res = page.goto(url, referer=generate_convincing_referer(url))
<11> page.wait_for_load_state(state="load")
<12> page.wait_for_load_state(state="domcontentloaded")
<13> if self.network_idle:
<14> page.wait_for_load_state('networkidle')
<15>
<16> page = self.page_action(page)
<17>
<18> if self.wait_selector and type(self.wait_selector) is str:
<19> waiter = page.locator(self.wait_selector)
<20> waiter.wait_for(state=self.wait_selector_state)
<21>
<22> content_type = res.headers.get('content-type', '')
<23> # Parse charset from content-type
<24> encoding = 'utf-8' # default encoding
<25> if 'charset=' in content_type.lower():
<26> encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
<27>
<28> response = Response(
<29> url=res.url,
<30> text=res.text(),
<31> content=res.body(),
<32> status=res.status,
<33> reason=res.status_text,
<34> encoding=encoding,
<35> cookies={cookie['name']: cookie['value'] for cookie in page.context.</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url)
get_os_name()
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route)
===========changed ref 0===========
<s> Optional[bool] = False,
+ disable_resources: Optional[bool] = False,
block_webrtc: Optional[bool] = False,
allow_webgl: Optional[bool] = False,
network_idle: Optional[bool] = False,
timeout: Optional[float] = 30000,
page_action: Callable = do_nothing,
wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
adaptor_arguments: Dict = None
):
self.headless = headless
self.block_images = bool(block_images)
+ self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
711de8110637cce88d7832b2f1b5bbf526dcb1a0
|
PlaywrightFetcher - The option to drop unnecessary resources requests for speed boost
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url) -> Response:
<0> if not self.stealth:
<1> from playwright.sync_api import sync_playwright
<2> else:
<3> from rebrowser_playwright.sync_api import sync_playwright
<4>
<5> with sync_playwright() as p:
<6> # Handle the UserAgent early
<7> if self.useragent:
<8> extra_headers = {}
<9> useragent = self.useragent
<10> else:
<11> extra_headers = generate_headers(browser_mode=True)
<12> useragent = extra_headers.get('User-Agent')
<13>
<14> # Prepare the flags before diving
<15> flags = DEFAULT_STEALTH_FLAGS
<16> if self.hide_canvas:
<17> flags += ['--fingerprinting-canvas-image-data-noise']
<18> if self.disable_webgl:
<19> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<20>
<21> # Creating the browser
<22> if self.cdp_url:
<23> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<24> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<25> else:
<26> if self.stealth:
<27> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<28> else:
<29> browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable-automation'])
<30>
<31> # Creating the context
<32> if self.stealth:
<33> context = browser.new_context(
<34> locale='en-US',
<35> is_mobile=False,
<36> has_touch=False,
<37> color_scheme='dark', # Bypasses the 'prefersLightColor</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url) -> Response:
# offset: 1
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url) -> Response:
# offset: 2
<s>('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.stealth else None)
page.wait_for_load_state(state="load")
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url) -> Response:
# offset: 3
<s>()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
===========unchanged ref 1===========
at: scrapling.engines.pw.PlaywrightEngine
_cdp_url_logic(flags: Optional[dict]=None)
at: scrapling.engines.pw.PlaywrightEngine.__init__
self.headless = headless
self.stealth = bool(stealth)
self.hide_canvas = bool(hide_canvas)
self.disable_webgl = bool(disable_webgl)
self.cdp_url = cdp_url
self.useragent = useragent
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url)
generate_headers(browser_mode=False)
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route)
js_bypass_path(filename)
|
|
scrapling.fetcher/StealthyFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
3556d11fb2f9a8a23c9f6fbb5c4db5e97d408a49
|
StealthyFetcher - Add `disable_resources` option and its explanation
|
<5>:<add> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<del> This can help save your proxy usage but careful with this option as it makes some websites never finish loading.
<6>:<add> :param disable_resources: Drop requests to unnecessary resources for speed boost.
<add> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<add> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
|
<s> False, network_idle: Optional[bool] = False,
- allow_webgl: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
- network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
) -> Response:
<0> """
<1> Opens up a browser and do your request based on your chosen options below.
<2> :param url: Target url.
<3> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<4> :param block_images: Prevent the loading of images through Firefox preferences.
<5> This can help save your proxy usage but careful with this option as it makes some websites never finish loading.
<6> :param block_webrtc: Blocks WebRTC entirely.
<7> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<8> :param network_idle: Wait for the page to not do do any requests.
<9> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<10> :param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
<11> :param wait_selector: Wait for a specific css selector to be in a specific state.
<12> :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
<13> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<14> """
<15> engine = CamoufoxEngine(
<16> timeout=timeout,
<17> headless=headless,
<18> page_action=page_action,
<19> </s>
|
===========below chunk 0===========
<s>: Optional[bool] = False,
- allow_webgl: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
- network_idle: Optional[bool] = False, timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
) -> Response:
# offset: 1
block_webrtc=block_webrtc,
allow_webgl=allow_webgl,
network_idle=network_idle,
wait_selector=wait_selector,
wait_selector_state=wait_selector_state,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.camo
CamoufoxEngine(headless: Union[bool, str]=True, block_images: Optional[bool]=False, disable_resources: Optional[bool]=False, block_webrtc: Optional[bool]=False, allow_webgl: Optional[bool]=False, network_idle: Optional[bool]=False, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, wait_selector_state: str='attached', adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
|
scrapling.engines.pw/PlaywrightEngine._cdp_url_logic
|
Modified
|
D4Vinci~Scrapling
|
2ff0fbb0e554eb52148d687934eba83e7c523727
|
Engines utils - Navigation functions
|
<20>:<add> cdp_url = construct_cdp_url(cdp_url, config)
<del> cdp_url = construct_websocket_url(cdp_url, config)
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def _cdp_url_logic(self, flags: Optional[List] = None) -> str:
<0> """Constructs new CDP URL if NSTBrowser is enabled otherwise return CDP URL as it is
<1>
<2> :param flags: Chrome flags to be added to NSTBrowser query
<3> :return: CDP URL
<4> """
<5> cdp_url = self.cdp_url
<6> if self.nstbrowser_mode:
<7> if self.nstbrowser_config and type(self.nstbrowser_config) is Dict:
<8> config = self.nstbrowser_config
<9> else:
<10> query = NSTBROWSER_DEFAULT_QUERY.copy()
<11> if flags:
<12> query.update({
<13> "args": dict(zip(flags, [''] * len(flags))), # browser args should be a dictionary
<14> })
<15>
<16> config = {
<17> 'config': json.dumps(query),
<18> # 'token': ''
<19> }
<20> cdp_url = construct_websocket_url(cdp_url, config)
<21>
<22> return cdp_url
<23>
|
===========unchanged ref 0===========
at: json
dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str
at: scrapling.engines.constants
NSTBROWSER_DEFAULT_QUERY = {
"once": True,
"headless": True,
"autoClose": True,
"fingerprint": {
"flags": {
"timezone": "BasedOnIp",
"screen": "Custom"
},
"platform": 'linux', # support: windows, mac, linux
"kernel": 'chromium', # only support: chromium
"kernelMilestone": '128',
"hardwareConcurrency": 8,
"deviceMemory": 8,
},
}
at: scrapling.engines.pw.PlaywrightEngine.__init__
self.cdp_url = cdp_url
self.nstbrowser_mode = bool(nstbrowser_mode)
self.nstbrowser_config = nstbrowser_config
at: typing
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.toolbelt.fingerprints/generate_suitable_fingerprint
|
Modified
|
D4Vinci~Scrapling
|
544d23b696f4864e3532bfbbc0482dc3b6b6c1c1
|
Toolbelt fingerprint functions - Doc strings and type annotations stuff
|
<0>:<add> """Generates a browserforge's fingerprint that matches current OS, desktop device, and Chrome with version 128 at least.
<add>
<add> This function was originally created to test Browserforge's injector.
<add> :return: `Fingerprint` object
<add> """
<del> # This would be for Browserforge playwright injector
<1>:<del> os_name = get_os_name()
<4>:<add> os=get_os_name(), # None is ignored
<del> os=os_name, # None is ignored
|
# module: scrapling.engines.toolbelt.fingerprints
+ def generate_suitable_fingerprint() -> Fingerprint:
- def generate_suitable_fingerprint():
<0> # This would be for Browserforge playwright injector
<1> os_name = get_os_name()
<2> return FingerprintGenerator(
<3> browser=[Browser(name='chrome', min_version=128)],
<4> os=os_name, # None is ignored
<5> device='desktop'
<6> ).generate()
<7>
|
===========unchanged ref 0===========
at: scrapling.engines.toolbelt.fingerprints.get_os_name
os_name = platform.system()
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
a60662af4a2a20f29adf1912df1a7e12ddb92f74
|
Update camo.py
|
<11>:<add> i_know_what_im_doing=True, # To turn warnings off with user configurations
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> ) as browser:
<12> page = browser.new_page()
<13> page.set_default_navigation_timeout(self.timeout)
<14> page.set_default_timeout(self.timeout)
<15> if self.disable_resources:
<16> page.route("**/*", intercept_route)
<17>
<18> res = page.goto(url, referer=generate_convincing_referer(url))
<19> page.wait_for_load_state(state="load")
<20> page.wait_for_load_state(state="domcontentloaded")
<21> if self.network_idle:
<22> page.wait_for_load_state('networkidle')
<23>
<24> page = self.page_action(page)
<25>
<26> if self.wait_selector and type(self.wait_selector) is str:
<27> waiter = page.locator(self.wait_selector)
<28> waiter.wait_for(state=self.wait_selector_state)
<29>
<30> content_type = res.headers.get('content-type', '')
<31> # Parse charset from content-type
<32> encoding = 'utf-8' # default encoding
<33> if 'charset=' in content_type.lower</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
get_os_name() -> Union[str, None]
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
|
scrapling.parser/Adaptors.re_first
|
Modified
|
D4Vinci~Scrapling
|
009c2914aa323091a58b9a0e1f8b8b18e2f60f25
|
Fixing `re_first` logic in `Adaptors` type
|
<1>:<add> the first result or the default value otherwise.
<del> their results flattened as List of TextHandler.
<8>:<add> for n in self:
<add> result = n.re_first(regex, None, replace_entities)
<add> if result:
<add> return result
<del> results = [
<9>:<del> n.text.re_first(regex, default, replace_entities) for n in self
<10>:<del> ]
<11>:<add> return default
<del> return flatten(results)
|
# module: scrapling.parser
class Adaptors(List[Adaptor]):
def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True):
<0> """Call the ``.re_first()`` method for each element in this list and return
<1> their results flattened as List of TextHandler.
<2>
<3> :param regex: Can be either a compiled regular expression or a string.
<4> :param default: The default value to be returned if there is no match
<5> :param replace_entities: if enabled character entity references are replaced by their corresponding character
<6>
<7> """
<8> results = [
<9> n.text.re_first(regex, default, replace_entities) for n in self
<10> ]
<11> return flatten(results)
<12>
|
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
body = html_content
re_first(regex: Union[str, Pattern[str]], default=None, replace_entities: bool=True)
at: scrapling.parser.Adaptors
__slots__ = ()
at: typing
Pattern = _alias(stdlib_re.Pattern, 1)
|
scrapling.engines.pw/PlaywrightEngine._cdp_url_logic
|
Modified
|
D4Vinci~Scrapling
|
6cf506cb22aa84bc8f80b3e21bf28482130df281
|
PlaywrightEngine - Validate CDP URLs in all cases
|
<21>:<add> else:
<add> # To validate it
<add> cdp_url = construct_cdp_url(cdp_url)
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def _cdp_url_logic(self, flags: Optional[List] = None) -> str:
<0> """Constructs new CDP URL if NSTBrowser is enabled otherwise return CDP URL as it is
<1>
<2> :param flags: Chrome flags to be added to NSTBrowser query
<3> :return: CDP URL
<4> """
<5> cdp_url = self.cdp_url
<6> if self.nstbrowser_mode:
<7> if self.nstbrowser_config and type(self.nstbrowser_config) is Dict:
<8> config = self.nstbrowser_config
<9> else:
<10> query = NSTBROWSER_DEFAULT_QUERY.copy()
<11> if flags:
<12> query.update({
<13> "args": dict(zip(flags, [''] * len(flags))), # browser args should be a dictionary
<14> })
<15>
<16> config = {
<17> 'config': json.dumps(query),
<18> # 'token': ''
<19> }
<20> cdp_url = construct_cdp_url(cdp_url, config)
<21>
<22> return cdp_url
<23>
|
===========unchanged ref 0===========
at: json
dumps(obj: Any, *, skipkeys: bool=..., ensure_ascii: bool=..., check_circular: bool=..., allow_nan: bool=..., cls: Optional[Type[JSONEncoder]]=..., indent: Union[None, int, str]=..., separators: Optional[Tuple[str, str]]=..., default: Optional[Callable[[Any], Any]]=..., sort_keys: bool=..., **kwds: Any) -> str
at: scrapling.engines.constants
NSTBROWSER_DEFAULT_QUERY = {
"once": True,
"headless": True,
"autoClose": True,
"fingerprint": {
"flags": {
"timezone": "BasedOnIp",
"screen": "Custom"
},
"platform": 'linux', # support: windows, mac, linux
"kernel": 'chromium', # only support: chromium
"kernelMilestone": '128',
"hardwareConcurrency": 8,
"deviceMemory": 8,
},
}
at: scrapling.engines.pw.PlaywrightEngine.__init__
self.cdp_url = cdp_url
self.nstbrowser_mode = bool(nstbrowser_mode)
self.nstbrowser_config = nstbrowser_config
at: scrapling.engines.toolbelt.navigation
construct_cdp_url(cdp_url: str, query_params: Dict) -> str
at: typing
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
a0a3e62075b1a349525cdf350a5f6de379feb85d
|
Adding `addons` and `humanize` arguments to StealthyFetcher
|
<9>:<add> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<add> :param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
|
<s> allow_webgl: Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
<0> """An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
<1>
<2> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<3> :param block_images: Prevent the loading of images through Firefox preferences.
<4> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<5> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<6> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<7> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<8> :param block_webrtc: Blocks WebRTC entirely.
<9> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<10> :param network_idle: Wait for the page to not do do any requests.
<11> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<12> :param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
<13> :param wait_selector: Wait for a specific css selector to be in a specific state.
<14> :param wait_selector_state: The state to wait for the selector given with `wait</s>
|
===========below chunk 0===========
<s> Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
# offset: 1
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
a0a3e62075b1a349525cdf350a5f6de379feb85d
|
Adding `addons` and `humanize` arguments to StealthyFetcher
|
<11>:<add> addons=self.addons,
<add> humanize=self.humanize,
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> i_know_what_im_doing=True, # To turn warnings off with user configurations
<12> ) as browser:
<13> page = browser.new_page()
<14> page.set_default_navigation_timeout(self.timeout)
<15> page.set_default_timeout(self.timeout)
<16> if self.disable_resources:
<17> page.route("**/*", intercept_route)
<18>
<19> res = page.goto(url, referer=generate_convincing_referer(url))
<20> page.wait_for_load_state(state="load")
<21> page.wait_for_load_state(state="domcontentloaded")
<22> if self.network_idle:
<23> page.wait_for_load_state('networkidle')
<24>
<25> page = self.page_action(page)
<26>
<27> if self.wait_selector and type(self.wait_selector) is str:
<28> waiter = page.locator(self.wait_selector)
<29> waiter.wait_for(state=self.wait_selector_state)
<30>
<31> content_type = res.headers.get('content-type', '')
<32> # Parse charset from content-type
</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
===========unchanged ref 1===========
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
<s> allow_webgl: Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
"""An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
:param block_images: Prevent the loading of images through Firefox preferences.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param block_webrtc: Blocks WebRTC entirely.
+ :param addons: List of Firefox addons to use. Must be paths to extracted addons.
+ :param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
:param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
:param network_idle: Wait for the page to not do do any requests.
:param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
:param page_action: Added for automation. A function that takes the `page` object, do the automation you</s>
===========changed ref 1===========
<s> Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
# offset: 1
<s> is 30000.
:param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
+ self.addons = addons or []
+ self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
|
scrapling.fetchers/StealthyFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
a0a3e62075b1a349525cdf350a5f6de379feb85d
|
Adding `addons` and `humanize` arguments to StealthyFetcher
|
<10>:<add> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<add> :param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
|
<s> = False, allow_webgl: Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
) -> Response:
<0> """
<1> Opens up a browser and do your request based on your chosen options below.
<2> :param url: Target url.
<3> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<4> :param block_images: Prevent the loading of images through Firefox preferences.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<7> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<8> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<9> :param block_webrtc: Blocks WebRTC entirely.
<10> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<11> :param network_idle: Wait for the page to not do do any requests.
<12> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<13> :param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
<14> :param wait_selector: Wait for a specific css selector to be in a specific state.
<15> :param wait_selector_state: The state to wait for the selector given with</s>
|
===========below chunk 0===========
<s>webgl: Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached',
) -> Response:
# offset: 1
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
engine = CamoufoxEngine(
timeout=timeout,
headless=headless,
page_action=page_action,
block_images=block_images,
block_webrtc=block_webrtc,
allow_webgl=allow_webgl,
disable_resources=disable_resources,
network_idle=network_idle,
wait_selector=wait_selector,
wait_selector_state=wait_selector_state,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.camo
CamoufoxEngine(headless: Union[bool, str]=True, block_images: Optional[bool]=False, disable_resources: Optional[bool]=False, block_webrtc: Optional[bool]=False, allow_webgl: Optional[bool]=False, network_idle: Optional[bool]=False, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, wait_selector_state: str='attached', adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
===========changed ref 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
with Camoufox(
headless=self.headless,
block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
os=get_os_name(),
block_webrtc=self.block_webrtc,
allow_webgl=self.allow_webgl,
+ addons=self.addons,
+ humanize=self.humanize,
i_know_what_im_doing=True, # To turn warnings off with user configurations
) as browser:
page = browser.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.disable_resources:
page.route("**/*", intercept_route)
res = page.goto(url, referer=generate_convincing_referer(url))
page.wait_for_load_state(state="load")
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
</s>
===========changed ref 1===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
<s>get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========changed ref 2===========
<s> allow_webgl: Optional[bool] = False, network_idle: Optional[bool] = False,
+ timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
- timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None,
wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
"""An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
:param block_images: Prevent the loading of images through Firefox preferences.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param block_webrtc: Blocks WebRTC entirely.
+ :param addons: List of Firefox addons to use. Must be paths to extracted addons.
+ :param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
:param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
:param network_idle: Wait for the page to not do do any requests.
:param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
:param page_action: Added for automation. A function that takes the `page` object, do the automation you</s>
|
scrapling.parser/Adaptors.re_first
|
Modified
|
D4Vinci~Scrapling
|
7af75f78a5651951f5f3554926b564fc997858cd
|
Better logic for `Adaptors` `re_first` method
|
<9>:<add> for result in n.re(regex, replace_entities):
<del> result = n.re_first(regex, None, replace_entities)
<10>:<del> if result:
|
# module: scrapling.parser
class Adaptors(List[Adaptor]):
def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True):
<0> """Call the ``.re_first()`` method for each element in this list and return
<1> the first result or the default value otherwise.
<2>
<3> :param regex: Can be either a compiled regular expression or a string.
<4> :param default: The default value to be returned if there is no match
<5> :param replace_entities: if enabled character entity references are replaced by their corresponding character
<6>
<7> """
<8> for n in self:
<9> result = n.re_first(regex, None, replace_entities)
<10> if result:
<11> return result
<12> return default
<13>
|
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
body = html_content
re(regex: Union[str, Pattern[str]], replace_entities: bool=True) -> 'List[str]'
at: scrapling.parser.Adaptors
__slots__ = ()
at: typing
Pattern = _alias(stdlib_re.Pattern, 1)
|
scrapling.parser/Adaptor.re
|
Modified
|
D4Vinci~Scrapling
|
7a84511b7a5f65bbf2f5b940ea05f5de977936db
|
Making `Adaptor` and `Adaptor` re/re_first arguments consistent with the TextHandler ones
|
<4>:<add> :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
<add> :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
<5>:<add> return self.text.re(regex, replace_entities, clean_match, case_sensitive)
<del> return self.text.re(regex, replace_entities)
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True,
- def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True) -> 'List[str]':
+ clean_match: bool = False, case_sensitive: bool = False) -> 'List[str]':
<0> """Apply the given regex to the current text and return a list of strings with the matches.
<1>
<2> :param regex: Can be either a compiled regular expression or a string.
<3> :param replace_entities: if enabled character entity references are replaced by their corresponding character
<4> """
<5> return self.text.re(regex, replace_entities)
<6>
|
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
body = html_content
at: typing
Pattern = _alias(stdlib_re.Pattern, 1)
|
scrapling.parser/Adaptor.re_first
|
Modified
|
D4Vinci~Scrapling
|
7a84511b7a5f65bbf2f5b940ea05f5de977936db
|
Making `Adaptor` and `Adaptor` re/re_first arguments consistent with the TextHandler ones
|
<5>:<add> :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
<add> :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
<add> """
<add> return self.text.re_first(regex, default, replace_entities, clean_match, case_sensitive)
<6>:<del> """
<7>:<del> return self.text.re_first(regex, default, replace_entities)
<8>:<del>
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True,
- def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True):
+ clean_match: bool = False, case_sensitive: bool = False) -> Union[str, None]:
<0> """Apply the given regex to text and return the first match if found, otherwise return the default value.
<1>
<2> :param regex: Can be either a compiled regular expression or a string.
<3> :param default: The default value to be returned if there is no match
<4> :param replace_entities: if enabled character entity references are replaced by their corresponding character
<5>
<6> """
<7> return self.text.re_first(regex, default, replace_entities)
<8>
|
===========unchanged ref 0===========
at: scrapling.core.custom_types.TextHandler
__slots__ = ()
re(regex: Union[str, Pattern[str]], replace_entities: bool=True, clean_match: bool=False, case_sensitive: bool=False, check_match: bool=False) -> Union[List[str], bool]
at: typing
Pattern = _alias(stdlib_re.Pattern, 1)
===========changed ref 0===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True,
- def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True) -> 'List[str]':
+ clean_match: bool = False, case_sensitive: bool = False) -> 'List[str]':
"""Apply the given regex to the current text and return a list of strings with the matches.
:param regex: Can be either a compiled regular expression or a string.
:param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
"""
+ return self.text.re(regex, replace_entities, clean_match, case_sensitive)
- return self.text.re(regex, replace_entities)
|
scrapling.parser/Adaptors.re
|
Modified
|
D4Vinci~Scrapling
|
7a84511b7a5f65bbf2f5b940ea05f5de977936db
|
Making `Adaptor` and `Adaptor` re/re_first arguments consistent with the TextHandler ones
|
<5>:<add> :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
<add> :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
<7>:<add> n.text.re(regex, replace_entities, clean_match, case_sensitive) for n in self
<del> n.text.re(regex, replace_entities) for n in self
|
# module: scrapling.parser
class Adaptors(List[Adaptor]):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True,
- def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True) -> 'List[str]':
+ clean_match: bool = False, case_sensitive: bool = False) -> 'List[str]':
<0> """Call the ``.re()`` method for each element in this list and return
<1> their results flattened as List of TextHandler.
<2>
<3> :param regex: Can be either a compiled regular expression or a string.
<4> :param replace_entities: if enabled character entity references are replaced by their corresponding character
<5> """
<6> results = [
<7> n.text.re(regex, replace_entities) for n in self
<8> ]
<9> return flatten(results)
<10>
|
===========unchanged ref 0===========
at: scrapling.core.utils
flatten(lst: Iterable)
at: scrapling.parser.Adaptor
css(selector: str, identifier: str='', auto_match: bool=False, auto_save: bool=False, percentage: int=0) -> Union['Adaptors[Adaptor]', List]
at: scrapling.parser.Adaptors
__slots__ = ()
at: typing
Pattern = _alias(stdlib_re.Pattern, 1)
===========changed ref 0===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True,
- def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True) -> 'List[str]':
+ clean_match: bool = False, case_sensitive: bool = False) -> 'List[str]':
"""Apply the given regex to the current text and return a list of strings with the matches.
:param regex: Can be either a compiled regular expression or a string.
:param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
"""
+ return self.text.re(regex, replace_entities, clean_match, case_sensitive)
- return self.text.re(regex, replace_entities)
===========changed ref 1===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True,
- def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True):
+ clean_match: bool = False, case_sensitive: bool = False) -> Union[str, None]:
"""Apply the given regex to text and return the first match if found, otherwise return the default value.
:param regex: Can be either a compiled regular expression or a string.
:param default: The default value to be returned if there is no match
:param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
+ """
+ return self.text.re_first(regex, default, replace_entities, clean_match, case_sensitive)
- """
- return self.text.re_first(regex, default, replace_entities)
-
|
scrapling.parser/Adaptors.re_first
|
Modified
|
D4Vinci~Scrapling
|
7a84511b7a5f65bbf2f5b940ea05f5de977936db
|
Making `Adaptor` and `Adaptor` re/re_first arguments consistent with the TextHandler ones
|
<6>:<add> :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
<add> :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
<del>
<9>:<add> for result in n.re(regex, replace_entities, clean_match, case_sensitive):
<del> for result in n.re(regex, replace_entities):
|
# module: scrapling.parser
class Adaptors(List[Adaptor]):
+ def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True,
- def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True):
+ clean_match: bool = False, case_sensitive: bool = False) -> Union[str, None]:
<0> """Call the ``.re_first()`` method for each element in this list and return
<1> the first result or the default value otherwise.
<2>
<3> :param regex: Can be either a compiled regular expression or a string.
<4> :param default: The default value to be returned if there is no match
<5> :param replace_entities: if enabled character entity references are replaced by their corresponding character
<6>
<7> """
<8> for n in self:
<9> for result in n.re(regex, replace_entities):
<10> return result
<11> return default
<12>
|
===========unchanged ref 0===========
at: scrapling.core.custom_types.TextHandler
re(regex: Union[str, Pattern[str]], replace_entities: bool=True, clean_match: bool=False, case_sensitive: bool=False, check_match: bool=False) -> Union[List[str], bool]
at: scrapling.core.utils
flatten(lst: Iterable)
at: typing
Pattern = _alias(stdlib_re.Pattern, 1)
===========changed ref 0===========
# module: scrapling.parser
class Adaptors(List[Adaptor]):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True,
- def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True) -> 'List[str]':
+ clean_match: bool = False, case_sensitive: bool = False) -> 'List[str]':
"""Call the ``.re()`` method for each element in this list and return
their results flattened as List of TextHandler.
:param regex: Can be either a compiled regular expression or a string.
:param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
"""
results = [
+ n.text.re(regex, replace_entities, clean_match, case_sensitive) for n in self
- n.text.re(regex, replace_entities) for n in self
]
return flatten(results)
===========changed ref 1===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True,
- def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True) -> 'List[str]':
+ clean_match: bool = False, case_sensitive: bool = False) -> 'List[str]':
"""Apply the given regex to the current text and return a list of strings with the matches.
:param regex: Can be either a compiled regular expression or a string.
:param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
"""
+ return self.text.re(regex, replace_entities, clean_match, case_sensitive)
- return self.text.re(regex, replace_entities)
===========changed ref 2===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True,
- def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True):
+ clean_match: bool = False, case_sensitive: bool = False) -> Union[str, None]:
"""Apply the given regex to text and return the first match if found, otherwise return the default value.
:param regex: Can be either a compiled regular expression or a string.
:param default: The default value to be returned if there is no match
:param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
+ """
+ return self.text.re_first(regex, default, replace_entities, clean_match, case_sensitive)
- """
- return self.text.re_first(regex, default, replace_entities)
-
|
scrapling.parser/Adaptor.__convert_results
|
Modified
|
D4Vinci~Scrapling
|
5e848ef0460e7a72d97782066763c39705827b39
|
Adding new class type `TextHandlers`
|
<13>:<add> elif all(isinstance(res, TextHandler) for res in results):
<add> return TextHandlers(results)
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def __convert_results(
self, result: Union[List[html.HtmlElement], html.HtmlElement]
) -> Union['Adaptors[Adaptor]', 'Adaptor', List, None]:
<0> """Used internally in all functions to convert results to type (Adaptor|Adaptors) in bulk when possible"""
<1> if result is None:
<2> return None
<3> elif result == []: # Lxml will give a warning if I used something like `not result`
<4> return []
<5>
<6> if isinstance(result, Adaptors):
<7> return result
<8>
<9> if type(result) is list:
<10> results = [self.__get_correct_result(n) for n in result]
<11> if all(isinstance(res, self.__class__) for res in results):
<12> return Adaptors(results)
<13> return results
<14>
<15> return self.__get_correct_result(result)
<16>
|
===========unchanged ref 0===========
at: scrapling.core.custom_types
TextHandler(o: object=...)
TextHandler(o: bytes, encoding: str=..., errors: str=...)
TextHandlers()
TextHandlers(iterable: Iterable[_T])
at: scrapling.parser
Adaptors()
Adaptors(iterable: Iterable[_T])
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
__get_correct_result(element: Union[html.HtmlElement, etree._ElementUnicodeResult]) -> Union[TextHandler, html.HtmlElement, 'Adaptor', str]
body = html_content
at: typing
List = _alias(list, 1, inst=False, name='List')
===========changed ref 0===========
# module: scrapling.core.custom_types
+ class TextHandlers(List[TextHandler]):
+ """
+ The :class:`TextHandlers` class is a subclass of the builtin ``List`` class, which provides a few additional methods.
+ """
+ __slots__ = ()
+
===========changed ref 1===========
# module: scrapling.core.custom_types
+ class TextHandlers(List[TextHandler]):
+ def __getitem__(self, pos: Union[SupportsIndex, slice]) -> Union[TextHandler, "TextHandlers[TextHandler]"]:
+ lst = super().__getitem__(pos)
+ if isinstance(pos, slice):
+ return self.__class__(lst)
+ else:
+ return lst
+
===========changed ref 2===========
# module: scrapling.core.custom_types
+ class TextHandlers(List[TextHandler]):
+ def re(self, regex: Union[str, Pattern[str]], replace_entities: bool = True, clean_match: bool = False,
+ case_sensitive: bool = False) -> 'List[str]':
+ """Call the ``.re()`` method for each element in this list and return
+ their results flattened as TextHandlers.
+
+ :param regex: Can be either a compiled regular expression or a string.
+ :param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
+ """
+ results = [
+ n.re(regex, replace_entities, clean_match, case_sensitive) for n in self
+ ]
+ return flatten(results)
+
===========changed ref 3===========
# module: scrapling.core.custom_types
+ class TextHandlers(List[TextHandler]):
+ def re_first(self, regex: Union[str, Pattern[str]], default=None, replace_entities: bool = True,
+ clean_match: bool = False, case_sensitive: bool = False) -> Union[str, None]:
+ """Call the ``.re_first()`` method for each element in this list and return
+ the first result or the default value otherwise.
+
+ :param regex: Can be either a compiled regular expression or a string.
+ :param default: The default value to be returned if there is no match
+ :param replace_entities: if enabled character entity references are replaced by their corresponding character
+ :param clean_match: if enabled, this will ignore all whitespaces and consecutive spaces while matching
+ :param case_sensitive: if enabled, function will set the regex to ignore letters case while compiling it
+ """
+ for n in self:
+ for result in n.re(regex, replace_entities, clean_match, case_sensitive):
+ return result
+ return default
+
|
scrapling.parser/Adaptor.css_first
|
Modified
|
D4Vinci~Scrapling
|
6b40af426fc90361c7d868b3f68a8206f8b5a538
|
Better logic for `css_first` and `xpath_first`
|
<17>:<del> try:
<18>:<add> for element in self.css(selector, identifier, auto_match, auto_save, percentage):
<del> return self.css(selector, identifier, auto_match, auto_save, percentage)[0]
<19>:<add> return element
<del> except (IndexError, TypeError,):
<20>:<add> return None
<del> return None
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def css_first(self, selector: str, identifier: str = '',
auto_match: bool = False, auto_save: bool = False, percentage: int = 0
) -> Union['Adaptor', 'TextHandler', None]:
<0> """Search current tree with CSS3 selectors and return the first result if possible, otherwise return `None`
<1>
<2> **Important:
<3> It's recommended to use the identifier argument if you plan to use different selector later
<4> and want to relocate the same element(s)**
<5>
<6> :param selector: The CSS3 selector to be used.
<7> :param auto_match: Enabled will make function try to relocate the element if it was 'saved' before
<8> :param identifier: A string that will be used to save/retrieve element's data in auto-matching
<9> otherwise the selector will be used.
<10> :param auto_save: Automatically save new elements for `auto_match` later
<11> :param percentage: The minimum percentage to accept while auto-matching and not going lower than that.
<12> Be aware that the percentage calculation depends solely on the page structure so don't play with this
<13> number unless you must know what you are doing!
<14>
<15> :return: List as :class:`Adaptors`
<16> """
<17> try:
<18> return self.css(selector, identifier, auto_match, auto_save, percentage)[0]
<19> except (IndexError, TypeError,):
<20> return None
<21>
|
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
body = html_content
css(selector: str, identifier: str='', auto_match: bool=False, auto_save: bool=False, percentage: int=0) -> Union['Adaptors[Adaptor]', List]
css(self, selector: str, identifier: str='', auto_match: bool=False, auto_save: bool=False, percentage: int=0) -> Union['Adaptors[Adaptor]', List]
|
scrapling.parser/Adaptor.xpath_first
|
Modified
|
D4Vinci~Scrapling
|
6b40af426fc90361c7d868b3f68a8206f8b5a538
|
Better logic for `css_first` and `xpath_first`
|
<19>:<del> try:
<20>:<add> for element in self.xpath(selector, identifier, auto_match, auto_save, percentage, **kwargs):
<del> return self.xpath(selector, identifier, auto_match, auto_save, percentage, **kwargs)[0]
<21>:<add> return element
<del> except (IndexError, TypeError,):
<22>:<add> return None
<del> return None
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def xpath_first(self, selector: str, identifier: str = '',
auto_match: bool = False, auto_save: bool = False, percentage: int = 0, **kwargs: Any
) -> Union['Adaptor', 'TextHandler', None]:
<0> """Search current tree with XPath selectors and return the first result if possible, otherwise return `None`
<1>
<2> **Important:
<3> It's recommended to use the identifier argument if you plan to use different selector later
<4> and want to relocate the same element(s)**
<5>
<6> Note: **Additional keyword arguments will be passed as XPath variables in the XPath expression!**
<7>
<8> :param selector: The XPath selector to be used.
<9> :param auto_match: Enabled will make function try to relocate the element if it was 'saved' before
<10> :param identifier: A string that will be used to save/retrieve element's data in auto-matching
<11> otherwise the selector will be used.
<12> :param auto_save: Automatically save new elements for `auto_match` later
<13> :param percentage: The minimum percentage to accept while auto-matching and not going lower than that.
<14> Be aware that the percentage calculation depends solely on the page structure so don't play with this
<15> number unless you must know what you are doing!
<16>
<17> :return: List as :class:`Adaptors`
<18> """
<19> try:
<20> return self.xpath(selector, identifier, auto_match, auto_save, percentage, **kwargs)[0]
<21> except (IndexError, TypeError,):
<22> return None
<23>
|
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
xpath(selector: str, identifier: str='', auto_match: bool=False, auto_save: bool=False, percentage: int=0, **kwargs: Any) -> Union['Adaptors[Adaptor]', List]
===========changed ref 0===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def css_first(self, selector: str, identifier: str = '',
auto_match: bool = False, auto_save: bool = False, percentage: int = 0
) -> Union['Adaptor', 'TextHandler', None]:
"""Search current tree with CSS3 selectors and return the first result if possible, otherwise return `None`
**Important:
It's recommended to use the identifier argument if you plan to use different selector later
and want to relocate the same element(s)**
:param selector: The CSS3 selector to be used.
:param auto_match: Enabled will make function try to relocate the element if it was 'saved' before
:param identifier: A string that will be used to save/retrieve element's data in auto-matching
otherwise the selector will be used.
:param auto_save: Automatically save new elements for `auto_match` later
:param percentage: The minimum percentage to accept while auto-matching and not going lower than that.
Be aware that the percentage calculation depends solely on the page structure so don't play with this
number unless you must know what you are doing!
:return: List as :class:`Adaptors`
"""
- try:
+ for element in self.css(selector, identifier, auto_match, auto_save, percentage):
- return self.css(selector, identifier, auto_match, auto_save, percentage)[0]
+ return element
- except (IndexError, TypeError,):
+ return None
- return None
|
tests.parser.test_general/TestParser.test_find_similar_elements
|
Modified
|
D4Vinci~Scrapling
|
846a185b4969fedc4b4fc7220ac493a8440c0830
|
Updating parser tests to use `find` and `css_first` functions
|
<5>:<add> first_review = self.page.find('div', class_='review')
<del> first_review = self.page.css('.review')[0]
|
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_find_similar_elements(self):
<0> """Test Finding similar elements of an element"""
<1> first_product = self.page.css_first('.product')
<2> similar_products = first_product.find_similar()
<3> self.assertEqual(len(similar_products), 2)
<4>
<5> first_review = self.page.css('.review')[0]
<6> similar_high_rated_reviews = [
<7> review
<8> for review in first_review.find_similar()
<9> if int(review.attrib.get('data-rating', 0)) >= 4
<10> ]
<11> self.assertEqual(len(similar_high_rated_reviews), 1)
<12>
| |
tests.parser.test_general/TestParser.test_element_navigation
|
Modified
|
D4Vinci~Scrapling
|
846a185b4969fedc4b4fc7220ac493a8440c0830
|
Updating parser tests to use `find` and `css_first` functions
|
<16>:<add> child = table.find({'data-id': "1"})
<del> child = table.css('[data-id="1"]')[0]
|
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_element_navigation(self):
<0> """Test moving in the page from selected element"""
<1> table = self.page.css('.product-list')[0]
<2>
<3> self.assertIsNot(table.path, [])
<4> self.assertNotEqual(table.html_content, '')
<5> self.assertNotEqual(table.prettify(), '')
<6>
<7> parent = table.parent
<8> self.assertEqual(parent.attrib['id'], 'products')
<9>
<10> children = table.children
<11> self.assertEqual(len(children), 3)
<12>
<13> parent_siblings = parent.siblings
<14> self.assertEqual(len(parent_siblings), 1)
<15>
<16> child = table.css('[data-id="1"]')[0]
<17> next_element = child.next
<18> self.assertEqual(next_element.attrib['data-id'], '2')
<19>
<20> prev_element = next_element.previous
<21> self.assertEqual(prev_element.tag, child.tag)
<22>
<23> all_prices = self.page.css('.price')
<24> products_with_prices = [
<25> price.find_ancestor(lambda p: p.has_class('product'))
<26> for price in all_prices
<27> ]
<28> self.assertEqual(len(products_with_prices), 3)
<29>
|
===========changed ref 0===========
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_find_similar_elements(self):
"""Test Finding similar elements of an element"""
first_product = self.page.css_first('.product')
similar_products = first_product.find_similar()
self.assertEqual(len(similar_products), 2)
+ first_review = self.page.find('div', class_='review')
- first_review = self.page.css('.review')[0]
similar_high_rated_reviews = [
review
for review in first_review.find_similar()
if int(review.attrib.get('data-rating', 0)) >= 4
]
self.assertEqual(len(similar_high_rated_reviews), 1)
|
tests.parser.test_general/TestParser.test_attribute_operations
|
Modified
|
D4Vinci~Scrapling
|
846a185b4969fedc4b4fc7220ac493a8440c0830
|
Updating parser tests to use `find` and `css_first` functions
|
<16>:<add> attr_json = self.page.css_first('#products').attrib['schema'].json()
<del> attr_json = self.page.css('#products')[0].attrib['schema'].json()
|
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_attribute_operations(self):
<0> """Test operations on elements attributes"""
<1> products = self.page.css('.product')
<2> product_ids = [product.attrib['data-id'] for product in products]
<3> self.assertEqual(product_ids, ['1', '2', '3'])
<4> self.assertTrue('data-id' in products[0].attrib)
<5>
<6> reviews = self.page.css('.review')
<7> review_ratings = [int(review.attrib['data-rating']) for review in reviews]
<8> self.assertEqual(sum(review_ratings) / len(review_ratings), 4.5)
<9>
<10> key_value = list(products[0].attrib.search_values('1', partial=False))
<11> self.assertEqual(list(key_value[0].keys()), ['data-id'])
<12>
<13> key_value = list(products[0].attrib.search_values('1', partial=True))
<14> self.assertEqual(list(key_value[0].keys()), ['data-id'])
<15>
<16> attr_json = self.page.css('#products')[0].attrib['schema'].json()
<17> self.assertEqual(attr_json, {'jsonable': 'data'})
<18> self.assertEqual(type(self.page.css('#products')[0].attrib.json_string), bytes)
<19>
|
===========changed ref 0===========
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_find_similar_elements(self):
"""Test Finding similar elements of an element"""
first_product = self.page.css_first('.product')
similar_products = first_product.find_similar()
self.assertEqual(len(similar_products), 2)
+ first_review = self.page.find('div', class_='review')
- first_review = self.page.css('.review')[0]
similar_high_rated_reviews = [
review
for review in first_review.find_similar()
if int(review.attrib.get('data-rating', 0)) >= 4
]
self.assertEqual(len(similar_high_rated_reviews), 1)
===========changed ref 1===========
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_element_navigation(self):
"""Test moving in the page from selected element"""
table = self.page.css('.product-list')[0]
self.assertIsNot(table.path, [])
self.assertNotEqual(table.html_content, '')
self.assertNotEqual(table.prettify(), '')
parent = table.parent
self.assertEqual(parent.attrib['id'], 'products')
children = table.children
self.assertEqual(len(children), 3)
parent_siblings = parent.siblings
self.assertEqual(len(parent_siblings), 1)
+ child = table.find({'data-id': "1"})
- child = table.css('[data-id="1"]')[0]
next_element = child.next
self.assertEqual(next_element.attrib['data-id'], '2')
prev_element = next_element.previous
self.assertEqual(prev_element.tag, child.tag)
all_prices = self.page.css('.price')
products_with_prices = [
price.find_ancestor(lambda p: p.has_class('product'))
for price in all_prices
]
self.assertEqual(len(products_with_prices), 3)
|
scrapling.parser/Adaptor.find_all
|
Modified
|
D4Vinci~Scrapling
|
b9195365274549c1d70334ec3d35ccb8e515d181
|
Updating the keywords whitelisting logic in `find`/`find_all` functions
|
<8>:<add> # https://www.w3schools.com/python/python_ref_keywords.asp
<9>:<del> 'id_': 'id',
<11>:<add> 'for_': 'for',
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def find_all(self, *args, **kwargs) -> Union['Adaptors[Adaptor]', List]:
<0> """Find elements by their tag name and filter them based on attributes for ease..
<1>
<2> :param args: Tag name(s), an iterable of tag names, or a dictionary of elements' attributes. Leave empty for selecting all.
<3> :param kwargs: The attributes you want to filter elements based on it.
<4> :return: The `Adaptors` object of the elements or empty list
<5> """
<6> # Attributes that are Python reserved words and can't be used directly
<7> # Ex: find_all('a', class="blah") -> find_all('a', class_="blah")
<8> whitelisted = {
<9> 'id_': 'id',
<10> 'class_': 'class',
<11> }
<12>
<13> if not args and not kwargs:
<14> raise TypeError('You have to pass something to search with, like tag name(s), tag attributes, or both.')
<15>
<16> tags = set()
<17> selectors = []
<18> attributes = dict()
<19> # Brace yourself for a wonderful journey!
<20> for arg in args:
<21> if type(arg) is str:
<22> tags.add(arg)
<23>
<24> elif type(arg) in [list, tuple, set]:
<25> if not all(map(lambda x: type(x) is str, arg)):
<26> raise TypeError('Nested Iterables are not accepted, only iterables of tag names are accepted')
<27> tags.update(set(arg))
<28>
<29> elif type(arg) is dict:
<30> if not all([(type(k) is str and type(v) is str) for k, v in arg.items()]):
<31> raise TypeError('Nested dictionaries are not accepted, only string keys and string values are accepted')
<32> attributes.update(arg)
<33>
<34> else:
<35> raise TypeError(f'Argument with type "{type(arg)}" is not accepted, please read the docs.')
<36>
<37> if not all([(type(</s>
|
===========below chunk 0===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def find_all(self, *args, **kwargs) -> Union['Adaptors[Adaptor]', List]:
# offset: 1
raise TypeError('Only string values are accepted for arguments')
attributes.update(kwargs)
# It's easier and faster to build a selector than traversing the tree
tags = tags or ['']
for tag in tags:
selector = tag
for key, value in attributes.items():
key = whitelisted.get(key, key)
value = value.replace('"', r'\"') # Escape double quotes in user input
# Not escaping anything with the key so the user can pass patterns like {'href*': '/p/'} or get errors :)
selector += '[{}="{}"]'.format(key, value)
selectors.append(selector)
return self.css(', '.join(selectors))
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
body = html_content
at: typing
List = _alias(list, 1, inst=False, name='List')
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
|
scrapling.engines.camo/CamoufoxEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
fd34cd9afab90eb392f1c9c7481757576fe46fd3
|
Adding arguments `google_search` and `extra_headers` to StealthyFetcher
|
<5>:<add> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<del> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
|
<s>[Union[bool, float]] = True,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
- wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
<0> """An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
<1>
<2> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<3> :param block_images: Prevent the loading of images through Firefox preferences.
<4> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<5> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<6> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<7> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<8> :param block_webrtc: Blocks WebRTC entirely.
<9> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<10> :param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
<11> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<12> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<13> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<14> :param page_action:</s>
|
===========below chunk 0===========
<s> float]] = True,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
- wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
# offset: 1
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
_SpecialForm(*args, **kwds)
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
fd34cd9afab90eb392f1c9c7481757576fe46fd3
|
Adding arguments `google_search` and `extra_headers` to StealthyFetcher
|
<21>:<add> if self.extra_headers:
<add> page.set_extra_http_headers(self.extra_headers)
<add>
<add> if self.google_search:
<add> res = page.goto(url, referer=generate_convincing_referer(url))
<del> res = page.goto(url, referer=generate_convincing_referer(url))
<22>:<add> else:
<add> res = page.goto(url)
<add>
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> addons=self.addons,
<12> humanize=self.humanize,
<13> i_know_what_im_doing=True, # To turn warnings off with user configurations
<14> ) as browser:
<15> page = browser.new_page()
<16> page.set_default_navigation_timeout(self.timeout)
<17> page.set_default_timeout(self.timeout)
<18> if self.disable_resources:
<19> page.route("**/*", intercept_route)
<20>
<21> res = page.goto(url, referer=generate_convincing_referer(url))
<22> page.wait_for_load_state(state="load")
<23> page.wait_for_load_state(state="domcontentloaded")
<24> if self.network_idle:
<25> page.wait_for_load_state('networkidle')
<26>
<27> page = self.page_action(page)
<28>
<29> if self.wait_selector and type(self.wait_selector) is str:
<30> waiter = page.locator(self.wait_selector)
<31> waiter.wait_for(state=self.wait_selector_state)
<32>
<33> content_type = res.</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
===========unchanged ref 1===========
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
<s>[Union[bool, float]] = True,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
- wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
"""An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
:param block_images: Prevent the loading of images through Firefox preferences.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
+ :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
- :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param block_webrtc: Blocks WebRTC entirely.
:param addons: List of Firefox addons to use. Must be paths to extracted addons.
:param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
:param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that's</s>
===========changed ref 1===========
<s> float]] = True,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, addons: Optional[List[str]] = None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
- wait_selector_state: str = 'attached', adaptor_arguments: Dict = None
):
# offset: 1
<s>idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
:param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
+ :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
+ :param extra_headers: A dictionary of extra headers to add to headers on the request. The referer set by the `google_search` argument takes priority over the referer set here if used together.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
+ self.google_search = bool(google_search)
+ self.extra</s>
|
scrapling.fetchers/StealthyFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
fd34cd9afab90eb392f1c9c7481757576fe46fd3
|
Adding arguments `google_search` and `extra_headers` to StealthyFetcher
|
<6>:<add> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<del> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
|
<s>_idle: Optional[bool] = False, addons: Optional[List[str]] = None,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None
- wait_selector_state: str = 'attached',
) -> Response:
<0> """
<1> Opens up a browser and do your request based on your chosen options below.
<2> :param url: Target url.
<3> :param headless: Run the browser in headless/hidden (default), 'virtual' screen mode, or headful/visible mode.
<4> :param block_images: Prevent the loading of images through Firefox preferences.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<7> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<8> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<9> :param block_webrtc: Blocks WebRTC entirely.
<10> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<11> :param humanize: Humanize the cursor movement. Takes either True, or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
<12> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<13> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<14> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<15> :param</s>
|
===========below chunk 0===========
<s>bool] = False, addons: Optional[List[str]] = None,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None
- wait_selector_state: str = 'attached',
) -> Response:
# offset: 1
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
engine = CamoufoxEngine(
timeout=timeout,
headless=headless,
page_action=page_action,
block_images=block_images,
block_webrtc=block_webrtc,
addons=addons,
humanize=humanize,
allow_webgl=allow_webgl,
disable_resources=disable_resources,
network_idle=network_idle,
wait_selector=wait_selector,
wait_selector_state=wait_selector_state,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.camo
CamoufoxEngine(headless: Optional[Union[bool, Literal['virtual']]]=True, block_images: Optional[bool]=False, disable_resources: Optional[bool]=False, block_webrtc: Optional[bool]=False, allow_webgl: Optional[bool]=False, network_idle: Optional[bool]=False, humanize: Optional[Union[bool, float]]=True, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, addons: Optional[List[str]]=None, wait_selector_state: str='attached', adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
do_nothing(page)
at: typing
_SpecialForm(*args, **kwds)
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
with Camoufox(
headless=self.headless,
block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
os=get_os_name(),
block_webrtc=self.block_webrtc,
allow_webgl=self.allow_webgl,
addons=self.addons,
humanize=self.humanize,
i_know_what_im_doing=True, # To turn warnings off with user configurations
) as browser:
page = browser.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.disable_resources:
page.route("**/*", intercept_route)
+ if self.extra_headers:
+ page.set_extra_http_headers(self.extra_headers)
+
+ if self.google_search:
+ res = page.goto(url, referer=generate_convincing_referer(url))
- res = page.goto(url, referer=generate_convincing_referer(url))
+ else:
+ res = page.goto(url)
+
page.wait_for_load_state(state="load")
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type</s>
===========changed ref 1===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
<s>_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
ee8f78feb1ac1761584580e0f1bc078ef282d391
|
Update camo.py
|
<24>:<del> if self.google_search:
<25>:<add> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<del> res = page.goto(url, referer=generate_convincing_referer(url))
<26>:<del> else:
<27>:<del> res = page.goto(url)
<28>:<del>
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> addons=self.addons,
<12> humanize=self.humanize,
<13> i_know_what_im_doing=True, # To turn warnings off with user configurations
<14> ) as browser:
<15> page = browser.new_page()
<16> page.set_default_navigation_timeout(self.timeout)
<17> page.set_default_timeout(self.timeout)
<18> if self.disable_resources:
<19> page.route("**/*", intercept_route)
<20>
<21> if self.extra_headers:
<22> page.set_extra_http_headers(self.extra_headers)
<23>
<24> if self.google_search:
<25> res = page.goto(url, referer=generate_convincing_referer(url))
<26> else:
<27> res = page.goto(url)
<28>
<29> page.wait_for_load_state(state="load")
<30> page.wait_for_load_state(state="domcontentloaded")
<31> if self.network_idle:
<32> page.wait_for_load_state('networkidle')
<33>
<34> page = self.page_action(page)
<35>
<36> if self.</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
===========unchanged ref 1===========
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
|
scrapling.engines.pw/PlaywrightEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
fd7d787ab005c5a440f8186f49ac812180f52707
|
Add `extra_headers` argument to PW and isolate google referer feature from stealth mode as `google_search`
|
<3>:<add> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<del> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
|
<s>state: Optional[str] = 'attached',
stealth: bool = False,
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
+ google_search: Optional[bool] = True,
+ extra_headers: Optional[Dict[str, str]] = None,
adaptor_arguments: Dict = None
):
<0> """An engine that utilizes PlayWright library, check the `PlayWrightFetcher` class for more documentation.
<1>
<2> :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
<3> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<4> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
<7> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<8> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<9> :param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
<10> :param wait_selector: Wait for a specific css selector to be in a specific state.
<11> :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
<12> :param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
<13> :param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
<14> </s>
|
===========below chunk 0===========
<s>] = 'attached',
stealth: bool = False,
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
+ google_search: Optional[bool] = True,
+ extra_headers: Optional[Dict[str, str]] = None,
adaptor_arguments: Dict = None
):
# offset: 1
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
:param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.disable_resources = disable_resources
self.network_idle = bool(network_idle)
self.stealth = bool(stealth)
self.hide_canvas = bool(hide_canvas)
self.disable_webgl = bool(disable_webgl)
self.cdp_url = cdp_url
self.useragent = useragent
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.nstbrowser_mode = bool(nstbrowser_mode)
self.nstbrowser_config = nstbrowser_config
self.adapt</s>
===========below chunk 1===========
<s>] = 'attached',
stealth: bool = False,
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
+ google_search: Optional[bool] = True,
+ extra_headers: Optional[Dict[str, str]] = None,
adaptor_arguments: Dict = None
):
# offset: 2
<s>mode = bool(nstbrowser_mode)
self.nstbrowser_config = nstbrowser_config
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
fd7d787ab005c5a440f8186f49ac812180f52707
|
Add `extra_headers` argument to PW and isolate google referer feature from stealth mode as `google_search`
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> if not self.stealth:
<6> from playwright.sync_api import sync_playwright
<7> else:
<8> from rebrowser_playwright.sync_api import sync_playwright
<9>
<10> with sync_playwright() as p:
<11> # Handle the UserAgent early
<12> if self.useragent:
<13> extra_headers = {}
<14> useragent = self.useragent
<15> else:
<16> extra_headers = generate_headers(browser_mode=True)
<17> useragent = extra_headers.get('User-Agent')
<18>
<19> # Prepare the flags before diving
<20> flags = DEFAULT_STEALTH_FLAGS
<21> if self.hide_canvas:
<22> flags += ['--fingerprinting-canvas-image-data-noise']
<23> if self.disable_webgl:
<24> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<25>
<26> # Creating the browser
<27> if self.cdp_url:
<28> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<29> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<30> else:
<31> if self.stealth:
<32> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<33> else:
<34> browser = p.chromium.launch(headless=self.headless, ignore_</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 2
<s>detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.stealth else None)
page.wait_for_load_state(state="load")
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 3
<s>
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
|
|
scrapling.fetchers/PlayWrightFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
fd7d787ab005c5a440f8186f49ac812180f52707
|
Add `extra_headers` argument to PW and isolate google referer feature from stealth mode as `google_search`
|
<s>str] = 'attached',
+ hide_canvas: bool = True, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: Optional[bool] = True,
- hide_canvas: bool = True, disable_webgl: bool = False,
stealth: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
) -> Response:
<0> """Opens up a browser and do your request based on your chosen options below.
<1> :param url: Target url.
<2> :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
<3> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<4> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
<7> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<8> :param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
<9> :param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
<10> :param wait_selector: Wait for a specific css selector to be in a specific state.
<11> :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
<12> :param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
<13> :param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
<14> </s>
|
===========below chunk 0===========
<s>',
+ hide_canvas: bool = True, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: Optional[bool] = True,
- hide_canvas: bool = True, disable_webgl: bool = False,
stealth: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
) -> Response:
# offset: 1
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
:param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
engine = PlaywrightEngine(
timeout=timeout,
stealth=stealth,
cdp_url=cdp_url,
headless=headless,
useragent=useragent,
page_action=page_action,
hide_canvas=hide_canvas,
network_idle=network_idle,
wait_selector=wait_selector,
disable_webgl=disable_webgl,
nstbrowser_mode=nstbrowser_mode,
nstbrowser_config=nstbrowser_config,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.pw
PlaywrightEngine(headless: Union[bool, str]=True, disable_resources: bool=False, useragent: Optional[str]=None, network_idle: Optional[bool]=False, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, wait_selector_state: Optional[str]='attached', stealth: bool=False, hide_canvas: bool=True, disable_webgl: bool=False, cdp_url: Optional[str]=None, nstbrowser_mode: bool=False, nstbrowser_config: Optional[Dict]=None, adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
<s>state: Optional[str] = 'attached',
stealth: bool = False,
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
+ google_search: Optional[bool] = True,
+ extra_headers: Optional[Dict[str, str]] = None,
adaptor_arguments: Dict = None
):
"""An engine that utilizes PlayWright library, check the `PlayWrightFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
+ :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
- :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that's used in all operations and waits through the page. Default is 30000.
:param page_action: Added for automation. A function that takes the `page` object, do the automation you need, then return `page` again.
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param stealth: Enables stealth mode, check the documentation to see what stealth mode does</s>
===========changed ref 1===========
<s>] = 'attached',
stealth: bool = False,
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
+ google_search: Optional[bool] = True,
+ extra_headers: Optional[Dict[str, str]] = None,
adaptor_arguments: Dict = None
):
# offset: 1
<s>`. Default state is `attached`.
:param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
:param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param disable_webgl: Disables WebGL and WebGL 2.0 support entirely.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
+ :param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
+ :param extra_headers: A dictionary of extra headers to add to headers on the request. The referer set by the `google_search` argument takes priority over the referer set here if used together.
:param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.disable_resources = disable_resources
self.network_idle = bool(network_idle)
self.stealth = bool(stealth)
</s>
|
|
scrapling.engines.toolbelt.custom/Response.adaptor
|
Modified
|
D4Vinci~Scrapling
|
b90c8adb0264db13e5d524ed9ae5830a79cc5ee9
|
Adding the argument `automatch_domain` to all fetchers
|
<1>:<add> automatch_domain = self.adaptor_arguments.pop('automatch_domain', None)
<2>:<add> return Adaptor(body=self.content, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
<del> return Adaptor(body=self.content, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
<4>:<add> return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
<del> return Adaptor(text=self.text, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
|
# module: scrapling.engines.toolbelt.custom
@dataclass(frozen=True)
class Response:
@property
def adaptor(self) -> Union[Adaptor, None]:
<0> """Generate Adaptor instance from this response if possible, otherwise return None"""
<1> if self.content:
<2> return Adaptor(body=self.content, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
<3> elif self.text:
<4> return Adaptor(text=self.text, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
<5> return None
<6>
|
===========unchanged ref 0===========
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.parser
Adaptor(text: Optional[str]=None, url: Optional[str]=None, body: bytes=b"", encoding: str="utf8", huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=True, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True)
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
|
scrapling.engines.toolbelt.custom/BaseFetcher.__init__
|
Modified
|
D4Vinci~Scrapling
|
b90c8adb0264db13e5d524ed9ae5830a79cc5ee9
|
Adding the argument `automatch_domain` to all fetchers
|
<11>:<add> :param automatch_domain: For cases where you want to automatch selectors across different websites as if they were on the same website, use this argument to unify them.
<add> Otherwise, the domain of the request is used by default.
|
# module: scrapling.engines.toolbelt.custom
class BaseFetcher:
def __init__(
self, huge_tree: bool = True, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True,
+ automatch_domain: Optional[str] = None,
):
<0> """Arguments below are the same from the Adaptor class so you can pass them directly, the rest of Adaptor's arguments
<1> are detected and passed automatically from the Fetcher based on the response for accessibility.
<2>
<3> :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
<4> libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
<5> :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
<6> :param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
<7> priority over all auto-match related arguments/functions in the class.
<8> :param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
<9> :param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
<10> If empty, default values will be used.
<11> :param debug: Enable debug mode
<12> """
<13> # Adaptor class parameters
<14> # I won't validate Adaptor's class parameters here again, I will leave it to be validated later
<15> self.adaptor_arguments = dict(
<16> huge_tree=huge_tree,
<17> keep_comments=keep_comments,
<18> auto_match=auto_match,
<19> storage=storage,
<20> storage_args=storage_args,
<21> debug=debug,
<22> )
<23> # If the user used fetchers first, then configure the logger from here instead of the `Adaptor` class
<24> setup_basic_logging(level='debug' if debug else '</s>
|
===========below chunk 0===========
# module: scrapling.engines.toolbelt.custom
class BaseFetcher:
def __init__(
self, huge_tree: bool = True, keep_comments: Optional[bool] = False, auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem, storage_args: Optional[Dict] = None, debug: Optional[bool] = True,
+ automatch_domain: Optional[str] = None,
):
# offset: 1
===========unchanged ref 0===========
at: scrapling.core.storage_adaptors
SQLiteStorageSystem(storage_file: str, url: Union[str, None]=None)
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: scrapling.engines.toolbelt.custom
@dataclass(frozen=True)
class Response:
@property
def adaptor(self) -> Union[Adaptor, None]:
"""Generate Adaptor instance from this response if possible, otherwise return None"""
+ automatch_domain = self.adaptor_arguments.pop('automatch_domain', None)
if self.content:
+ return Adaptor(body=self.content, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
- return Adaptor(body=self.content, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
elif self.text:
+ return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
- return Adaptor(text=self.text, url=self.url, encoding=self.encoding, **self.adaptor_arguments)
return None
|
scrapling.parser/Adaptor.find_all
|
Modified
|
D4Vinci~Scrapling
|
6b62579a7e22e6826813f1d69fc1862587a03bc2
|
Making `find_all`/`find` methods on steroids
|
<0>:<add> """Find elements by filters of your creations for ease..
<del> """Find elements by their tag name and filter them based on attributes for ease..
<2>:<add> :param args: Tag name(s), an iterable of tag names, regex patterns, function, or a dictionary of elements' attributes. Leave empty for selecting all.
<del> :param args: Tag name(s), an iterable of tag names, or a dictionary of elements' attributes. Leave empty for selecting all.
<17>:<del> tags = set()
<18>:<del> selectors = []
<20>:<add> tags, patterns = set(), set()
<add> results, functions, selectors = [], [], []
<add>
<add> def _search_tree(element: Adaptor, filter_function: Callable) -> None:
<add> """Collect element if it fulfills passed function otherwise, traverse the children tree and iterate"""
<add> if filter_function(element):
<add> results.append(element)
<add>
<add> for branch in element.children:
<add> _search_tree(branch, filter_function)
<add>
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def find_all(self, *args: Union[str, Iterable[str], Pattern, Callable, Dict[str, str]], **kwargs: str) -> Union['Adaptors[Adaptor]', List]:
- def find_all(self, *args, **kwargs) -> Union['Adaptors[Adaptor]', List]:
<0> """Find elements by their tag name and filter them based on attributes for ease..
<1>
<2> :param args: Tag name(s), an iterable of tag names, or a dictionary of elements' attributes. Leave empty for selecting all.
<3> :param kwargs: The attributes you want to filter elements based on it.
<4> :return: The `Adaptors` object of the elements or empty list
<5> """
<6> # Attributes that are Python reserved words and can't be used directly
<7> # Ex: find_all('a', class="blah") -> find_all('a', class_="blah")
<8> # https://www.w3schools.com/python/python_ref_keywords.asp
<9> whitelisted = {
<10> 'class_': 'class',
<11> 'for_': 'for',
<12> }
<13>
<14> if not args and not kwargs:
<15> raise TypeError('You have to pass something to search with, like tag name(s), tag attributes, or both.')
<16>
<17> tags = set()
<18> selectors = []
<19> attributes = dict()
<20> # Brace yourself for a wonderful journey!
<21> for arg in args:
<22> if type(arg) is str:
<23> tags.add(arg)
<24>
<25> elif type(arg) in [list, tuple, set]:
<26> if not all(map(lambda x: type(x) is str, arg)):
<27> raise TypeError('Nested Iterables are not accepted, only iterables of tag names are accepted')
<28> tags.update(set(arg))
<29>
<30> elif type(arg) is dict:
<31> if not all([(type(k) is str and type(v) is str) for k, v in arg.items()]):
<32> </s>
|
===========below chunk 0===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
+ def find_all(self, *args: Union[str, Iterable[str], Pattern, Callable, Dict[str, str]], **kwargs: str) -> Union['Adaptors[Adaptor]', List]:
- def find_all(self, *args, **kwargs) -> Union['Adaptors[Adaptor]', List]:
# offset: 1
attributes.update(arg)
else:
raise TypeError(f'Argument with type "{type(arg)}" is not accepted, please read the docs.')
if not all([(type(k) is str and type(v) is str) for k, v in kwargs.items()]):
raise TypeError('Only string values are accepted for arguments')
for attribute_name, value in kwargs.items():
# Only replace names for kwargs, replacing them in dictionaries doesn't make sense
attribute_name = whitelisted.get(attribute_name, attribute_name)
attributes[attribute_name] = value
# It's easier and faster to build a selector than traversing the tree
tags = tags or ['']
for tag in tags:
selector = tag
for key, value in attributes.items():
value = value.replace('"', r'\"') # Escape double quotes in user input
# Not escaping anything with the key so the user can pass patterns like {'href*': '/p/'} or get errors :)
selector += '[{}="{}"]'.format(key, value)
selectors.append(selector)
return self.css(', '.join(selectors))
|
scrapling.parser/Adaptor.find_all
|
Modified
|
D4Vinci~Scrapling
|
4b2814404aec933772f57d38c2db5c71776ba573
|
Fixing the functions-based filtering logic
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def find_all(self, *args: Union[str, Iterable[str], Pattern, Callable, Dict[str, str]], **kwargs: str) -> Union['Adaptors[Adaptor]', List]:
<0> """Find elements by filters of your creations for ease..
<1>
<2> :param args: Tag name(s), an iterable of tag names, regex patterns, function, or a dictionary of elements' attributes. Leave empty for selecting all.
<3> :param kwargs: The attributes you want to filter elements based on it.
<4> :return: The `Adaptors` object of the elements or empty list
<5> """
<6> # Attributes that are Python reserved words and can't be used directly
<7> # Ex: find_all('a', class="blah") -> find_all('a', class_="blah")
<8> # https://www.w3schools.com/python/python_ref_keywords.asp
<9> whitelisted = {
<10> 'class_': 'class',
<11> 'for_': 'for',
<12> }
<13>
<14> if not args and not kwargs:
<15> raise TypeError('You have to pass something to search with, like tag name(s), tag attributes, or both.')
<16>
<17> attributes = dict()
<18> tags, patterns = set(), set()
<19> results, functions, selectors = [], [], []
<20>
<21> def _search_tree(element: Adaptor, filter_function: Callable) -> None:
<22> """Collect element if it fulfills passed function otherwise, traverse the children tree and iterate"""
<23> if filter_function(element):
<24> results.append(element)
<25>
<26> for branch in element.children:
<27> _search_tree(branch, filter_function)
<28>
<29> # Brace yourself for a wonderful journey!
<30> for arg in args:
<31> if type(arg) is str:
<32> tags.add(arg)
<33>
<34> elif type(arg) in [list, tuple, set]:
<35> if not all(map(lambda x: type(x) is str, arg)):
</s>
|
===========below chunk 0===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def find_all(self, *args: Union[str, Iterable[str], Pattern, Callable, Dict[str, str]], **kwargs: str) -> Union['Adaptors[Adaptor]', List]:
# offset: 1
tags.update(set(arg))
elif type(arg) is dict:
if not all([(type(k) is str and type(v) is str) for k, v in arg.items()]):
raise TypeError('Nested dictionaries are not accepted, only string keys and string values are accepted')
attributes.update(arg)
elif type(arg) is re.Pattern:
patterns.add(arg)
elif callable(arg):
if len(inspect.signature(arg).parameters) > 0:
functions.append(arg)
else:
raise TypeError("Callable filter function must have at least one argument to take `Adaptor` objects.")
else:
raise TypeError(f'Argument with type "{type(arg)}" is not accepted, please read the docs.')
if not all([(type(k) is str and type(v) is str) for k, v in kwargs.items()]):
raise TypeError('Only string values are accepted for arguments')
for attribute_name, value in kwargs.items():
# Only replace names for kwargs, replacing them in dictionaries doesn't make sense
attribute_name = whitelisted.get(attribute_name, attribute_name)
attributes[attribute_name] = value
# It's easier and faster to build a selector than traversing the tree
tags = tags or ['']
for tag in tags:
selector = tag
for key, value in attributes.items():
value = value.replace('"', r'\"') # Escape double quotes in user input
# Not escaping anything with the key so the user can pass patterns like {'href*': '/p/'} or get errors :)
selector += '[{}="{}"]'.format(key, value)
if selector:
selectors.append(selector)
if selectors:
results = self.css(', '.join(selectors</s>
===========below chunk 1===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def find_all(self, *args: Union[str, Iterable[str], Pattern, Callable, Dict[str, str]], **kwargs: str) -> Union['Adaptors[Adaptor]', List]:
# offset: 2
<s> if selector:
selectors.append(selector)
if selectors:
results = self.css(', '.join(selectors))
if results:
# From the results, get the ones that fulfill passed regex patterns
for pattern in patterns:
results = results.filter(lambda e: e.text.re(pattern, check_match=True))
# From the results, get the ones that fulfill passed functions
for function in functions:
results = results.filter(function)
else:
for pattern in patterns:
results.extend(self.find_by_regex(pattern, first_match=False))
for function in functions:
_search_tree(self, function)
return self.__convert_results(results)
|
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
43045e3eb2fdd01e4de7a731f1d367aae03fc5ab
|
Get page content after JS Execution
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> if not self.stealth:
<6> from playwright.sync_api import sync_playwright
<7> else:
<8> from rebrowser_playwright.sync_api import sync_playwright
<9>
<10> with sync_playwright() as p:
<11> # Handle the UserAgent early
<12> if self.useragent:
<13> extra_headers = {}
<14> useragent = self.useragent
<15> else:
<16> extra_headers = generate_headers(browser_mode=True)
<17> useragent = extra_headers.get('User-Agent')
<18>
<19> # Prepare the flags before diving
<20> flags = DEFAULT_STEALTH_FLAGS
<21> if self.hide_canvas:
<22> flags += ['--fingerprinting-canvas-image-data-noise']
<23> if self.disable_webgl:
<24> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<25>
<26> # Creating the browser
<27> if self.cdp_url:
<28> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<29> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<30> else:
<31> if self.stealth:
<32> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<33> else:
<34> browser = p.chromium.launch(headless=self.headless, ignore_</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 2
<s>scan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for_load_state(state="load")
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 3
<s> content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
|
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
43045e3eb2fdd01e4de7a731f1d367aae03fc5ab
|
Get page content after JS Execution
|
<24>:<add> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None, wait_until="domcontentloaded")
<del> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<25>:<del> page.wait_for_load_state(state="load")
<26>:<del> page.wait_for_load_state(state="domcontentloaded")
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> addons=self.addons,
<12> humanize=self.humanize,
<13> i_know_what_im_doing=True, # To turn warnings off with user configurations
<14> ) as browser:
<15> page = browser.new_page()
<16> page.set_default_navigation_timeout(self.timeout)
<17> page.set_default_timeout(self.timeout)
<18> if self.disable_resources:
<19> page.route("**/*", intercept_route)
<20>
<21> if self.extra_headers:
<22> page.set_extra_http_headers(self.extra_headers)
<23>
<24> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<25> page.wait_for_load_state(state="load")
<26> page.wait_for_load_state(state="domcontentloaded")
<27> if self.network_idle:
<28> page.wait_for_load_state('networkidle')
<29>
<30> page = self.page_action(page)
<31>
<32> if self.wait_selector and type(self.wait_selector) is str:
<33> waiter =</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
===========unchanged ref 1===========
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
if not self.stealth:
from playwright.sync_api import sync_playwright
else:
from rebrowser_playwright.sync_api import sync_playwright
with sync_playwright() as p:
# Handle the UserAgent early
if self.useragent:
extra_headers = {}
useragent = self.useragent
else:
extra_headers = generate_headers(browser_mode=True)
useragent = extra_headers.get('User-Agent')
# Prepare the flags before diving
flags = DEFAULT_STEALTH_FLAGS
if self.hide_canvas:
flags += ['--fingerprinting-canvas-image-data-noise']
if self.disable_webgl:
flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
# Creating the browser
if self.cdp_url:
cdp_url = self._cdp_url_logic(flags if self.stealth else None)
browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
else:
if self.stealth:
browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
else:
browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable-automation'])
# Creating the context
if self.stealth:
context = browser.new_</s>
===========changed ref 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
<s>--enable-automation'])
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/</s>
|
scrapling.engines.toolbelt.custom/Response.adaptor
|
Modified
|
D4Vinci~Scrapling
|
239e2c73bfe458f42b11f35cfb9ed9fe1d3a4479
|
Correctly get page content after JS Execution
|
<2>:<add> if self.text:
<add> # For playwright that will be the response after all JS executed
<add> return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
<add> elif self.content:
<del> if self.content:
<3>:<add> # For playwright, that's after all JS is loaded but not all of them executed, because playwright doesn't offer something like page.content()
<add> # To get response Bytes after the load states
<add> # Reference: https://playwright.dev/python/docs/api/class-page
<4>:<del> elif self.text:
<5>:<del> return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
|
# module: scrapling.engines.toolbelt.custom
@dataclass(frozen=True)
class Response:
@property
def adaptor(self) -> Union[Adaptor, None]:
<0> """Generate Adaptor instance from this response if possible, otherwise return None"""
<1> automatch_domain = self.adaptor_arguments.pop('automatch_domain', None)
<2> if self.content:
<3> return Adaptor(body=self.content, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
<4> elif self.text:
<5> return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
<6> return None
<7>
|
===========unchanged ref 0===========
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
at: scrapling.parser
Adaptor(text: Optional[str]=None, url: Optional[str]=None, body: bytes=b"", encoding: str="utf8", huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=True, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True)
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
239e2c73bfe458f42b11f35cfb9ed9fe1d3a4479
|
Correctly get page content after JS Execution
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> if not self.stealth:
<6> from playwright.sync_api import sync_playwright
<7> else:
<8> from rebrowser_playwright.sync_api import sync_playwright
<9>
<10> with sync_playwright() as p:
<11> # Handle the UserAgent early
<12> if self.useragent:
<13> extra_headers = {}
<14> useragent = self.useragent
<15> else:
<16> extra_headers = generate_headers(browser_mode=True)
<17> useragent = extra_headers.get('User-Agent')
<18>
<19> # Prepare the flags before diving
<20> flags = DEFAULT_STEALTH_FLAGS
<21> if self.hide_canvas:
<22> flags += ['--fingerprinting-canvas-image-data-noise']
<23> if self.disable_webgl:
<24> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<25>
<26> # Creating the browser
<27> if self.cdp_url:
<28> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<29> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<30> else:
<31> if self.stealth:
<32> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<33> else:
<34> browser = p.chromium.launch(headless=self.headless, ignore_</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 2
<s>scan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None, wait_until="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 3
<s>.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
|
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
239e2c73bfe458f42b11f35cfb9ed9fe1d3a4479
|
Correctly get page content after JS Execution
|
<24>:<add> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<del> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None, wait_until="domcontentloaded")
<25>:<add> page.wait_for_load_state(state="domcontentloaded")
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> addons=self.addons,
<12> humanize=self.humanize,
<13> i_know_what_im_doing=True, # To turn warnings off with user configurations
<14> ) as browser:
<15> page = browser.new_page()
<16> page.set_default_navigation_timeout(self.timeout)
<17> page.set_default_timeout(self.timeout)
<18> if self.disable_resources:
<19> page.route("**/*", intercept_route)
<20>
<21> if self.extra_headers:
<22> page.set_extra_http_headers(self.extra_headers)
<23>
<24> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None, wait_until="domcontentloaded")
<25> if self.network_idle:
<26> page.wait_for_load_state('networkidle')
<27>
<28> page = self.page_action(page)
<29>
<30> if self.wait_selector and type(self.wait_selector) is str:
<31> waiter = page.locator(self.wait_selector)
<32> waiter.wait_for(state=self.wait_selector_state</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=res.text(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, encoding: str='utf-8', cookies: Dict=field(default_factory=dict), headers: Dict=field(default_factory=dict), request_headers: Dict=field(default_factory=dict), adaptor_arguments: Dict=field(default_factory=dict))
at: scrapling.engines.toolbelt.custom.Response
url: str
text: str
content: bytes
status: int
reason: str
encoding: str = 'utf-8' # default encoding
cookies: Dict = field(default_factory=dict)
headers: Dict = field(default_factory=dict)
request_headers: Dict = field(default_factory=dict)
adaptor_arguments: Dict = field(default_factory=dict)
===========unchanged ref 1===========
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
# module: scrapling.engines.toolbelt.custom
@dataclass(frozen=True)
class Response:
@property
def adaptor(self) -> Union[Adaptor, None]:
"""Generate Adaptor instance from this response if possible, otherwise return None"""
automatch_domain = self.adaptor_arguments.pop('automatch_domain', None)
+ if self.text:
+ # For playwright that will be the response after all JS executed
+ return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
+ elif self.content:
- if self.content:
+ # For playwright, that's after all JS is loaded but not all of them executed, because playwright doesn't offer something like page.content()
+ # To get response Bytes after the load states
+ # Reference: https://playwright.dev/python/docs/api/class-page
return Adaptor(body=self.content, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
- elif self.text:
- return Adaptor(text=self.text, url=automatch_domain or self.url, encoding=self.encoding, **self.adaptor_arguments)
return None
===========changed ref 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
if not self.stealth:
from playwright.sync_api import sync_playwright
else:
from rebrowser_playwright.sync_api import sync_playwright
with sync_playwright() as p:
# Handle the UserAgent early
if self.useragent:
extra_headers = {}
useragent = self.useragent
else:
extra_headers = generate_headers(browser_mode=True)
useragent = extra_headers.get('User-Agent')
# Prepare the flags before diving
flags = DEFAULT_STEALTH_FLAGS
if self.hide_canvas:
flags += ['--fingerprinting-canvas-image-data-noise']
if self.disable_webgl:
flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
# Creating the browser
if self.cdp_url:
cdp_url = self._cdp_url_logic(flags if self.stealth else None)
browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
else:
if self.stealth:
browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
else:
browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable-automation'])
# Creating the context
if self.stealth:
context = browser.new_</s>
|
tests.fetchers.test_camoufox/TestStealthyFetcher.setUp
|
Modified
|
D4Vinci~Scrapling
|
de2d65856c2054cb6e0eab608a8d13bee2771b78
|
Adding playwright/camoufox tests for cookies and improve the timeout test
|
<7>:<add> self.delayed_url = f'{url}/delay/10' # 10 Seconds delay response
<add> self.cookies_url = f"{url}/cookies/set/test/value"
|
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def setUp(self):
<0> self.fetcher = StealthyFetcher(auto_match=False)
<1> url = self.httpbin.url
<2> self.status_200 = f'{url}/status/200'
<3> self.status_404 = f'{url}/status/404'
<4> self.status_501 = f'{url}/status/501'
<5> self.basic_url = f'{url}/get'
<6> self.html_url = f'{url}/html'
<7>
| |
tests.fetchers.test_camoufox/TestStealthyFetcher.test_infinite_timeout
|
Modified
|
D4Vinci~Scrapling
|
de2d65856c2054cb6e0eab608a8d13bee2771b78
|
Adding playwright/camoufox tests for cookies and improve the timeout test
|
<1>:<add> self.assertEqual(self.fetcher.fetch(self.delayed_url, timeout=None).status, 200)
<del> self.assertEqual(self.fetcher.fetch(self.html_url, timeout=None).status, 200)
|
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def test_infinite_timeout(self):
<0> """Test if infinite timeout breaks the code or not"""
<1> self.assertEqual(self.fetcher.fetch(self.html_url, timeout=None).status, 200)
<2>
|
===========changed ref 0===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
+ def test_cookies_loading(self):
+ """Test if cookies are set after the request"""
+ self.assertEqual(self.fetcher.fetch(self.cookies_url).cookies, {'test': 'value'})
+
===========changed ref 1===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def setUp(self):
self.fetcher = StealthyFetcher(auto_match=False)
url = self.httpbin.url
self.status_200 = f'{url}/status/200'
self.status_404 = f'{url}/status/404'
self.status_501 = f'{url}/status/501'
self.basic_url = f'{url}/get'
self.html_url = f'{url}/html'
+ self.delayed_url = f'{url}/delay/10' # 10 Seconds delay response
+ self.cookies_url = f"{url}/cookies/set/test/value"
|
tests.fetchers.test_playwright/TestPlayWrightFetcher.setUp
|
Modified
|
D4Vinci~Scrapling
|
de2d65856c2054cb6e0eab608a8d13bee2771b78
|
Adding playwright/camoufox tests for cookies and improve the timeout test
|
<7>:<add> self.delayed_url = f'{url}/delay/10' # 10 Seconds delay response
<add> self.cookies_url = f"{url}/cookies/set/test/value"
|
# module: tests.fetchers.test_playwright
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestPlayWrightFetcher(unittest.TestCase):
def setUp(self):
<0> self.fetcher = PlayWrightFetcher(auto_match=False)
<1> url = self.httpbin.url
<2> self.status_200 = f'{url}/status/200'
<3> self.status_404 = f'{url}/status/404'
<4> self.status_501 = f'{url}/status/501'
<5> self.basic_url = f'{url}/get'
<6> self.html_url = f'{url}/html'
<7>
|
===========changed ref 0===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
+ def test_cookies_loading(self):
+ """Test if cookies are set after the request"""
+ self.assertEqual(self.fetcher.fetch(self.cookies_url).cookies, {'test': 'value'})
+
===========changed ref 1===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def test_infinite_timeout(self):
"""Test if infinite timeout breaks the code or not"""
+ self.assertEqual(self.fetcher.fetch(self.delayed_url, timeout=None).status, 200)
- self.assertEqual(self.fetcher.fetch(self.html_url, timeout=None).status, 200)
===========changed ref 2===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def setUp(self):
self.fetcher = StealthyFetcher(auto_match=False)
url = self.httpbin.url
self.status_200 = f'{url}/status/200'
self.status_404 = f'{url}/status/404'
self.status_501 = f'{url}/status/501'
self.basic_url = f'{url}/get'
self.html_url = f'{url}/html'
+ self.delayed_url = f'{url}/delay/10' # 10 Seconds delay response
+ self.cookies_url = f"{url}/cookies/set/test/value"
|
tests.fetchers.test_playwright/TestPlayWrightFetcher.test_infinite_timeout
|
Modified
|
D4Vinci~Scrapling
|
de2d65856c2054cb6e0eab608a8d13bee2771b78
|
Adding playwright/camoufox tests for cookies and improve the timeout test
|
<1>:<add> self.assertEqual(self.fetcher.fetch(self.delayed_url, timeout=None).status, 200)
<del> self.assertEqual(self.fetcher.fetch(self.html_url, timeout=None).status, 200)
|
# module: tests.fetchers.test_playwright
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestPlayWrightFetcher(unittest.TestCase):
def test_infinite_timeout(self):
<0> """Test if infinite timeout breaks the code or not"""
<1> self.assertEqual(self.fetcher.fetch(self.html_url, timeout=None).status, 200)
<2>
|
===========changed ref 0===========
# module: tests.fetchers.test_playwright
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestPlayWrightFetcher(unittest.TestCase):
+ def test_cookies_loading(self):
+ """Test if cookies are set after the request"""
+ self.assertEqual(self.fetcher.fetch(self.cookies_url).cookies, {'test': 'value'})
+
===========changed ref 1===========
# module: tests.fetchers.test_playwright
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestPlayWrightFetcher(unittest.TestCase):
def setUp(self):
self.fetcher = PlayWrightFetcher(auto_match=False)
url = self.httpbin.url
self.status_200 = f'{url}/status/200'
self.status_404 = f'{url}/status/404'
self.status_501 = f'{url}/status/501'
self.basic_url = f'{url}/get'
self.html_url = f'{url}/html'
+ self.delayed_url = f'{url}/delay/10' # 10 Seconds delay response
+ self.cookies_url = f"{url}/cookies/set/test/value"
===========changed ref 2===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
+ def test_cookies_loading(self):
+ """Test if cookies are set after the request"""
+ self.assertEqual(self.fetcher.fetch(self.cookies_url).cookies, {'test': 'value'})
+
===========changed ref 3===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def test_infinite_timeout(self):
"""Test if infinite timeout breaks the code or not"""
+ self.assertEqual(self.fetcher.fetch(self.delayed_url, timeout=None).status, 200)
- self.assertEqual(self.fetcher.fetch(self.html_url, timeout=None).status, 200)
===========changed ref 4===========
# module: tests.fetchers.test_camoufox
@pytest_httpbin.use_class_based_httpbin
# @pytest_httpbin.use_class_based_httpbin_secure
class TestStealthyFetcher(unittest.TestCase):
def setUp(self):
self.fetcher = StealthyFetcher(auto_match=False)
url = self.httpbin.url
self.status_200 = f'{url}/status/200'
self.status_404 = f'{url}/status/404'
self.status_501 = f'{url}/status/501'
self.basic_url = f'{url}/get'
self.html_url = f'{url}/html'
+ self.delayed_url = f'{url}/delay/10' # 10 Seconds delay response
+ self.cookies_url = f"{url}/cookies/set/test/value"
|
scrapling.core.mixins/SelectorsGeneration.__general_selection
|
Modified
|
D4Vinci~Scrapling
|
3d4b427168b7a984a3b7c523306767823c9fae0e
|
Rework Selectors generation
|
<15>:<add> if not full_path:
<add> return (
<del> return (
<16>:<add> " > ".join(reversed(selectorPath)) if css
<del> " > ".join(reversed(selectorPath)) if css
<17>:<add> else '//*' + "/".join(reversed(selectorPath))
<del> else '//*' + "/".join(reversed(selectorPath))
<18>:<add> )
<del> )
|
# module: scrapling.core.mixins
class SelectorsGeneration:
+ def __general_selection(self, selection: str = 'css', full_path=False) -> str:
- def __general_selection(self, selection: str = 'css') -> str:
<0> """Generate a selector for the current element.
<1> :return: A string of the generated selector.
<2> """
<3> selectorPath = []
<4> target = self
<5> css = selection.lower() == 'css'
<6> while target is not None:
<7> if target.parent:
<8> if target.attrib.get('id'):
<9> # id is enough
<10> part = (
<11> f'#{target.attrib["id"]}' if css
<12> else f"[@id='{target.attrib['id']}']"
<13> )
<14> selectorPath.append(part)
<15> return (
<16> " > ".join(reversed(selectorPath)) if css
<17> else '//*' + "/".join(reversed(selectorPath))
<18> )
<19> else:
<20> part = f'{target.tag}'
<21> # We won't use classes anymore because I some websites share exact classes between elements
<22> # classes = target.attrib.get('class', '').split()
<23> # if classes and css:
<24> # part += f".{'.'.join(classes)}"
<25> # else:
<26> counter = {}
<27> for child in target.parent.children:
<28> counter.setdefault(child.tag, 0)
<29> counter[child.tag] += 1
<30> if child._root == target._root:
<31> break
<32>
<33> if counter[target.tag] > 1:
<34> part += (
<35> f":nth-of-type({counter[target.tag]})" if css
<36> else f"[{counter[target.tag]}]"
<37> )
<38>
<39> selectorPath.append(part)
<40> target = target.parent
<41> if target is None or target.tag == 'html':
<42> return (
<43> " > ".join(reversed(selectorPath</s>
|
===========below chunk 0===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ def __general_selection(self, selection: str = 'css', full_path=False) -> str:
- def __general_selection(self, selection: str = 'css') -> str:
# offset: 1
else '//' + "/".join(reversed(selectorPath))
)
else:
break
return (
" > ".join(reversed(selectorPath)) if css
else '//' + "/".join(reversed(selectorPath))
)
|
tests.parser.test_general/TestParser.test_selectors_generation
|
Modified
|
D4Vinci~Scrapling
|
3d4b427168b7a984a3b7c523306767823c9fae0e
|
Rework Selectors generation
|
<2>:<add> self.assertTrue(type(element.generate_css_selector) is str)
<del> self.assertTrue(type(element.css_selector) is str)
<3>:<add> self.assertTrue(type(element.generate_xpath_selector) is str)
<del> self.assertTrue(type(element.xpath_selector) is str)
|
# module: tests.parser.test_general
class TestParser(unittest.TestCase):
def test_selectors_generation(self):
<0> """Try to create selectors for all elements in the page"""
<1> def _traverse(element: Adaptor):
<2> self.assertTrue(type(element.css_selector) is str)
<3> self.assertTrue(type(element.xpath_selector) is str)
<4> for branch in element.children:
<5> _traverse(branch)
<6>
<7> _traverse(self.page)
<8>
|
===========changed ref 0===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ @property
+ def generate_css_selector(self) -> str:
+ """Generate a CSS selector for the current element
+ :return: A string of the generated selector.
+ """
+ return self.__general_selection()
+
===========changed ref 1===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ @property
+ def generate_xpath_selector(self) -> str:
+ """Generate a XPath selector for the current element
+ :return: A string of the generated selector.
+ """
+ return self.__general_selection('xpath')
+
===========changed ref 2===========
# module: scrapling.core.mixins
class SelectorsGeneration:
- @property
- def css_selector(self) -> str:
- """Generate a CSS selector for the current element
- :return: A string of the generated selector.
- """
- return self.__general_selection()
-
===========changed ref 3===========
# module: scrapling.core.mixins
class SelectorsGeneration:
- @property
- def xpath_selector(self) -> str:
- """Generate a XPath selector for the current element
- :return: A string of the generated selector.
- """
- return self.__general_selection('xpath')
-
===========changed ref 4===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ @property
+ def generate_full_css_selector(self) -> str:
+ """Generate a complete CSS selector for the current element
+ :return: A string of the generated selector.
+ """
+ return self.__general_selection(full_path=True)
+
===========changed ref 5===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ @property
+ def generate_full_xpath_selector(self) -> str:
+ """Generate a complete XPath selector for the current element
+ :return: A string of the generated selector.
+ """
+ return self.__general_selection('xpath', full_path=True)
+
===========changed ref 6===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ def __general_selection(self, selection: str = 'css', full_path=False) -> str:
- def __general_selection(self, selection: str = 'css') -> str:
"""Generate a selector for the current element.
:return: A string of the generated selector.
"""
selectorPath = []
target = self
css = selection.lower() == 'css'
while target is not None:
if target.parent:
if target.attrib.get('id'):
# id is enough
part = (
f'#{target.attrib["id"]}' if css
else f"[@id='{target.attrib['id']}']"
)
selectorPath.append(part)
+ if not full_path:
+ return (
- return (
+ " > ".join(reversed(selectorPath)) if css
- " > ".join(reversed(selectorPath)) if css
+ else '//*' + "/".join(reversed(selectorPath))
- else '//*' + "/".join(reversed(selectorPath))
+ )
- )
else:
part = f'{target.tag}'
# We won't use classes anymore because I some websites share exact classes between elements
# classes = target.attrib.get('class', '').split()
# if classes and css:
# part += f".{'.'.join(classes)}"
# else:
counter = {}
for child in target.parent.children:
counter.setdefault(child.tag, 0)
counter[child.tag] += 1
if child._root == target._root:
break
if counter[target.tag] > 1:
part += (
f":nth-of-type({counter[target.tag]})" if css
else f"[{counter[target.tag]}]"
)
selectorPath.append(part)
target = target.parent
if target is None or target.tag == 'html':
return (
</s>
===========changed ref 7===========
# module: scrapling.core.mixins
class SelectorsGeneration:
+ def __general_selection(self, selection: str = 'css', full_path=False) -> str:
- def __general_selection(self, selection: str = 'css') -> str:
# offset: 1
<s>append(part)
target = target.parent
if target is None or target.tag == 'html':
return (
" > ".join(reversed(selectorPath)) if css
else '//' + "/".join(reversed(selectorPath))
)
else:
break
return (
" > ".join(reversed(selectorPath)) if css
else '//' + "/".join(reversed(selectorPath))
)
|
scrapling.parser/Adaptor.__init__
|
Modified
|
D4Vinci~Scrapling
|
81f4e18a8658953665654bb62b0fb98dd2164698
|
Merge remote-tracking branch 'origin/main' into dev
|
<s> Optional[str] = None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
<0> """The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
<1> with expressions in CSS, XPath, or with simply text. Check the docs for more info.
<2>
<3> Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
<4> inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
<5> not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
<6> It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
<7>
<8> :param text: HTML body passed as text.
<9> :param url: allows storing a URL with the html data for retrieving later.
<10> :param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
<11> :param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
<12> :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
<13> libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
<14> :param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
<15> Don't use it unless you know what you are doing!
<16> :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
<17> :param auto_match: Glob</s>
|
===========below chunk 0===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 1
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
parser = html.HTMLParser(
# https://lxml.de/api/lxml.etree.HTMLParser-class.html
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False), encoding=encoding,
compact=True, huge_tree=huge_tree, default_doctype=True
)
self._root = etree.fromstring(body, parser=parser, base_url=url)
else:
# All html types inherits from HtmlMixin so this</s>
===========below chunk 1===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 2
<s>(body, parser=parser, base_url=url)
else:
# All html types inherits from HtmlMixin so this to check for all at once
if not issubclass(type(root), html.HtmlMixin):
raise TypeError(
f"Root have to be a valid element of `html` module types to work, not of type {type(root)}"
)
self._root = root
setup_basic_logging(level='debug' if debug else 'info')
self.__auto_match_enabled = auto_match
if self.__auto_match_enabled:
if not storage_args:
storage_args = {
'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'),
'url': url
}
if not hasattr(storage, '__wrapped__'):
raise ValueError("Storage class must be wrapped with cache decorator, see docs for info")
if not issubclass(storage.__wrapped__, StorageSystemMixin):
raise ValueError("Storage system must be inherited from class `StorageSystemMixin`")
self._storage = storage(**storage_args)
self.__keep_comments = keep_comments
self.__huge_tree_enabled = huge_tree
self.encoding = encoding
self.url = url
# For selector stuff
self.__attributes = None
self.__text = None
self.__tag = None
self.__debug = debug
===========unchanged ref 0===========
at: functools._lru_cache_wrapper
__wrapped__: Callable[..., _T]
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
dirname(p: _PathLike[AnyStr]) -> AnyStr
dirname(p: AnyStr) -> AnyStr
at: scrapling.core.custom_types
TextHandler(o: object=...)
TextHandler(o: bytes, encoding: str=..., errors: str=...)
at: scrapling.core.storage_adaptors
StorageSystemMixin(url: Union[str, None]=None)
SQLiteStorageSystem(storage_file: str, url: Union[str, None]=None)
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.core.utils
is_jsonable(content: Union[bytes, str]) -> bool
setup_basic_logging(level: str='debug')
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.parser.Adaptor.attrib
self.__attributes = AttributesHandler(self._root.attrib)
at: scrapling.parser.Adaptor.text
self.__text = TextHandler(self._root.text)
self.__text = TextHandler(fragment_root.text)
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: scrapling.core.utils
+ def is_jsonable(content: Union[bytes, str]) -> bool:
+ if type(content) is bytes:
+ content = content.decode()
+
+ try:
+ _ = orjson.loads(content)
+ return True
+ except orjson.JSONDecodeError:
+ return False
+
|
|
scrapling.engines.static/StaticEngine.get
|
Modified
|
D4Vinci~Scrapling
|
81f4e18a8658953665654bb62b0fb98dd2164698
|
Merge remote-tracking branch 'origin/main' into dev
|
<7>:<add> headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
<del> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
|
# module: scrapling.engines.static
class StaticEngine:
def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
<0> """Make basic HTTP GET request for you but with some added flavors.
<1> :param url: Target url.
<2> :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
<3> create a referer header as if this request had came from Google's search of this URL's domain.
<4> :param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
<5> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<6> """
<7> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
<8> request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
<9> return self._prepare_response(request)
<10>
|
===========changed ref 0===========
# module: scrapling.core.utils
+ def is_jsonable(content: Union[bytes, str]) -> bool:
+ if type(content) is bytes:
+ content = content.decode()
+
+ try:
+ _ = orjson.loads(content)
+ return True
+ except orjson.JSONDecodeError:
+ return False
+
===========changed ref 1===========
<s> Optional[str] = None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
===========changed ref 2===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 1
<s> for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
+ self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
+ # https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
- # https://lxml.de/api/lxml.etree.HTMLParser-</s>
===========changed ref 3===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 2
<s>html
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False), encoding=encoding,
compact=True, huge_tree=huge_tree, default_doctype=True
)
self._root = etree.fromstring(body, parser=parser, base_url=url)
+ if is_jsonable(text or body.decode()):
+ self.__text = TextHandler(text or body.decode())
else:
# All html types inherits from HtmlMixin so this to check for all at once
if not issubclass(type(root), html.HtmlMixin):
raise TypeError(
f"Root have to be a valid element of `html` module types to work, not of type {type(root)}"
)
self._root = root
setup_basic_logging(level='debug' if debug else 'info')
self.__auto_match_enabled = auto_match
if self.__auto_match_enabled:
if not storage_args:
storage_args = {
'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'),
'url': url
}
if not hasattr(storage, '__wrapped__'):
raise ValueError("Storage class must be wrapped with cache decorator, see docs for info")
if not issubclass(storage.__wrapped__, StorageSystemMixin):
raise ValueError("Storage system must be inherited from class `</s>
===========changed ref 4===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 3
<s>Mixin`")
self._storage = storage(**storage_args)
self.__keep_comments = keep_comments
self.__huge_tree_enabled = huge_tree
self.encoding = encoding
self.url = url
# For selector stuff
self.__attributes = None
- self.__text = None
self.__tag = None
self.__debug = debug
|
scrapling.engines.static/StaticEngine.post
|
Modified
|
D4Vinci~Scrapling
|
81f4e18a8658953665654bb62b0fb98dd2164698
|
Merge remote-tracking branch 'origin/main' into dev
|
<7>:<add> headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
<del> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
|
# module: scrapling.engines.static
class StaticEngine:
def post(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
<0> """Make basic HTTP POST request for you but with some added flavors.
<1> :param url: Target url.
<2> :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
<3> create a referer header as if this request had came from Google's search of this URL's domain.
<4> :param kwargs: Any additional keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
<5> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<6> """
<7> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
<8> request = httpx.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
<9> return self._prepare_response(request)
<10>
|
===========changed ref 0===========
# module: scrapling.engines.static
class StaticEngine:
def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
"""Make basic HTTP GET request for you but with some added flavors.
:param url: Target url.
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
create a referer header as if this request had came from Google's search of this URL's domain.
:param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
+ headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
- headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
return self._prepare_response(request)
===========changed ref 1===========
# module: scrapling.core.utils
+ def is_jsonable(content: Union[bytes, str]) -> bool:
+ if type(content) is bytes:
+ content = content.decode()
+
+ try:
+ _ = orjson.loads(content)
+ return True
+ except orjson.JSONDecodeError:
+ return False
+
===========changed ref 2===========
<s> Optional[str] = None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
===========changed ref 3===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 1
<s> for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
+ self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
+ # https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
- # https://lxml.de/api/lxml.etree.HTMLParser-</s>
===========changed ref 4===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 2
<s>html
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False), encoding=encoding,
compact=True, huge_tree=huge_tree, default_doctype=True
)
self._root = etree.fromstring(body, parser=parser, base_url=url)
+ if is_jsonable(text or body.decode()):
+ self.__text = TextHandler(text or body.decode())
else:
# All html types inherits from HtmlMixin so this to check for all at once
if not issubclass(type(root), html.HtmlMixin):
raise TypeError(
f"Root have to be a valid element of `html` module types to work, not of type {type(root)}"
)
self._root = root
setup_basic_logging(level='debug' if debug else 'info')
self.__auto_match_enabled = auto_match
if self.__auto_match_enabled:
if not storage_args:
storage_args = {
'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'),
'url': url
}
if not hasattr(storage, '__wrapped__'):
raise ValueError("Storage class must be wrapped with cache decorator, see docs for info")
if not issubclass(storage.__wrapped__, StorageSystemMixin):
raise ValueError("Storage system must be inherited from class `</s>
|
scrapling.engines.static/StaticEngine.delete
|
Modified
|
D4Vinci~Scrapling
|
81f4e18a8658953665654bb62b0fb98dd2164698
|
Merge remote-tracking branch 'origin/main' into dev
|
<7>:<add> headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
<del> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
|
# module: scrapling.engines.static
class StaticEngine:
def delete(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
<0> """Make basic HTTP DELETE request for you but with some added flavors.
<1> :param url: Target url.
<2> :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
<3> create a referer header as if this request had came from Google's search of this URL's domain.
<4> :param kwargs: Any additional keyword arguments are passed directly to `httpx.delete()` function so check httpx documentation for details.
<5> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<6> """
<7> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
<8> request = httpx.delete(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
<9> return self._prepare_response(request)
<10>
|
===========changed ref 0===========
# module: scrapling.engines.static
class StaticEngine:
def post(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
"""Make basic HTTP POST request for you but with some added flavors.
:param url: Target url.
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
create a referer header as if this request had came from Google's search of this URL's domain.
:param kwargs: Any additional keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
+ headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
- headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
request = httpx.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
return self._prepare_response(request)
===========changed ref 1===========
# module: scrapling.engines.static
class StaticEngine:
def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
"""Make basic HTTP GET request for you but with some added flavors.
:param url: Target url.
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
create a referer header as if this request had came from Google's search of this URL's domain.
:param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
+ headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
- headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
return self._prepare_response(request)
===========changed ref 2===========
# module: scrapling.core.utils
+ def is_jsonable(content: Union[bytes, str]) -> bool:
+ if type(content) is bytes:
+ content = content.decode()
+
+ try:
+ _ = orjson.loads(content)
+ return True
+ except orjson.JSONDecodeError:
+ return False
+
===========changed ref 3===========
<s> Optional[str] = None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
===========changed ref 4===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
# offset: 1
<s> for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
+ self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
+ # https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
- # https://lxml.de/api/lxml.etree.HTMLParser-</s>
|
scrapling.engines.static/StaticEngine.put
|
Modified
|
D4Vinci~Scrapling
|
81f4e18a8658953665654bb62b0fb98dd2164698
|
Merge remote-tracking branch 'origin/main' into dev
|
<7>:<add> headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
<del> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
|
# module: scrapling.engines.static
class StaticEngine:
def put(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
<0> """Make basic HTTP PUT request for you but with some added flavors.
<1> :param url: Target url.
<2> :param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
<3> create a referer header as if this request had came from Google's search of this URL's domain.
<4> :param kwargs: Any additional keyword arguments are passed directly to `httpx.put()` function so check httpx documentation for details.
<5> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<6> """
<7> headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
<8> request = httpx.put(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
<9> return self._prepare_response(request)
<10>
|
===========changed ref 0===========
# module: scrapling.engines.static
class StaticEngine:
def delete(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
"""Make basic HTTP DELETE request for you but with some added flavors.
:param url: Target url.
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
create a referer header as if this request had came from Google's search of this URL's domain.
:param kwargs: Any additional keyword arguments are passed directly to `httpx.delete()` function so check httpx documentation for details.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
+ headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
- headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
request = httpx.delete(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
return self._prepare_response(request)
===========changed ref 1===========
# module: scrapling.engines.static
class StaticEngine:
def post(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
"""Make basic HTTP POST request for you but with some added flavors.
:param url: Target url.
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
create a referer header as if this request had came from Google's search of this URL's domain.
:param kwargs: Any additional keyword arguments are passed directly to `httpx.post()` function so check httpx documentation for details.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
+ headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
- headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
request = httpx.post(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
return self._prepare_response(request)
===========changed ref 2===========
# module: scrapling.engines.static
class StaticEngine:
def get(self, url: str, stealthy_headers: Optional[bool] = True, **kwargs: Dict) -> Response:
"""Make basic HTTP GET request for you but with some added flavors.
:param url: Target url.
:param stealthy_headers: If enabled (default), Fetcher will create and add real browser's headers and
create a referer header as if this request had came from Google's search of this URL's domain.
:param kwargs: Any additional keyword arguments are passed directly to `httpx.get()` function so check httpx documentation for details.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
+ headers = self._headers_job(kwargs.pop('headers', {}), url, stealthy_headers)
- headers = self._headers_job(kwargs.get('headers'), url, stealthy_headers)
request = httpx.get(url=url, headers=headers, follow_redirects=self.follow_redirects, timeout=self.timeout, **kwargs)
return self._prepare_response(request)
===========changed ref 3===========
# module: scrapling.core.utils
+ def is_jsonable(content: Union[bytes, str]) -> bool:
+ if type(content) is bytes:
+ content = content.decode()
+
+ try:
+ _ = orjson.loads(content)
+ return True
+ except orjson.JSONDecodeError:
+ return False
+
===========changed ref 4===========
<s> Optional[str] = None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
|
scrapling.parser/Adaptor.text
|
Modified
|
D4Vinci~Scrapling
|
b0c2b1818b9abcc31eec86c630de6be1e4e428fa
|
Return to the default text behaviour
|
<2>:<del> if self.__keep_comments:
<3>:<del> if not self.children:
<4>:<del> # If use chose to keep comments, remove comments from text
<5>:<add> # If you want to escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>`
<del> # Escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>`
<6>:<del> # This issue is present in parsel/scrapy as well so no need to repeat it here so the user can run regex on the full text.
<7>:<del> code = self.html_content
<8>:<del> parser = html.HTMLParser(
<9>:<del> recover=True, remove_blank_text=True, remove_comments=True, encoding=self.encoding,
<10>:<del> compact=True, huge_tree=self.__huge_tree_enabled, default_doctype=True
<11>:<del> )
<12>:<del> fragment_root = html.fragment_fromstring(code, parser=parser)
<13>:<del> self.__text = TextHandler(fragment_root.text)
<14>:<del> else:
<15>:<del> self.__text = TextHandler(self._root.text)
<16>:<del> else:
<17>:<del> # If user already chose to not keep comments then all is good
<18>:<add> # before extracting text then keep `keep_comments` set to False while initializing the first class
<add> self.__text = TextHandler(self._root.text)
<del> self.__text = TextHandler(self._root.text)
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
@property
def text(self) -> TextHandler:
<0> """Get text content of the element"""
<1> if not self.__text:
<2> if self.__keep_comments:
<3> if not self.children:
<4> # If use chose to keep comments, remove comments from text
<5> # Escape lxml default behaviour and remove comments like this `<span>CONDITION: <!-- -->Excellent</span>`
<6> # This issue is present in parsel/scrapy as well so no need to repeat it here so the user can run regex on the full text.
<7> code = self.html_content
<8> parser = html.HTMLParser(
<9> recover=True, remove_blank_text=True, remove_comments=True, encoding=self.encoding,
<10> compact=True, huge_tree=self.__huge_tree_enabled, default_doctype=True
<11> )
<12> fragment_root = html.fragment_fromstring(code, parser=parser)
<13> self.__text = TextHandler(fragment_root.text)
<14> else:
<15> self.__text = TextHandler(self._root.text)
<16> else:
<17> # If user already chose to not keep comments then all is good
<18> self.__text = TextHandler(self._root.text)
<19> return self.__text
<20>
|
===========unchanged ref 0===========
at: scrapling.core.custom_types
TextHandler(o: object=...)
TextHandler(o: bytes, encoding: str=..., errors: str=...)
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
body = html_content
at: scrapling.parser.Adaptor.__init__
self.__text = None
self.__text = TextHandler(text or body.decode())
self._root = root
self._root = etree.fromstring(body, parser=parser, base_url=url)
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
scrapling.engines.pw/PlaywrightEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
2abb70218f46e9889cafb13cf6d0150aa8711793
|
Adding the proxy support to browser-based Fetchers
|
<s>
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
google_search: Optional[bool] = True,
extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
adaptor_arguments: Dict = None
):
<0> """An engine that utilizes PlayWright library, check the `PlayWrightFetcher` class for more documentation.
<1>
<2> :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
<3> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<4> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
<7> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<8> :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
<9> :param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
<10> :param wait_selector: Wait for a specific css selector to be in a specific state.
<11> :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
<12> :param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
<13> :param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
<14> </s>
|
===========below chunk 0===========
<s>: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
google_search: Optional[bool] = True,
extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
adaptor_arguments: Dict = None
):
# offset: 1
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
:param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.disable_resources = disable_resources
self.network_idle = bool(network_idle)
self.stealth = bool(stealth)
self.hide_canvas = bool(hide_canvas)
self.disable_webgl = bool(disable_webgl)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.cdp_url = cdp_url
self.useragent = useragent
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_</s>
===========below chunk 1===========
<s>: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
google_search: Optional[bool] = True,
extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
adaptor_arguments: Dict = None
):
# offset: 2
<s>
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.nstbrowser_mode = bool(nstbrowser_mode)
self.nstbrowser_config = nstbrowser_config
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
|
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
2abb70218f46e9889cafb13cf6d0150aa8711793
|
Adding the proxy support to browser-based Fetchers
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> if not self.stealth:
<6> from playwright.sync_api import sync_playwright
<7> else:
<8> from rebrowser_playwright.sync_api import sync_playwright
<9>
<10> with sync_playwright() as p:
<11> # Handle the UserAgent early
<12> if self.useragent:
<13> extra_headers = {}
<14> useragent = self.useragent
<15> else:
<16> extra_headers = generate_headers(browser_mode=True)
<17> useragent = extra_headers.get('User-Agent')
<18>
<19> # Prepare the flags before diving
<20> flags = DEFAULT_STEALTH_FLAGS
<21> if self.hide_canvas:
<22> flags += ['--fingerprinting-canvas-image-data-noise']
<23> if self.disable_webgl:
<24> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<25>
<26> # Creating the browser
<27> if self.cdp_url:
<28> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<29> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<30> else:
<31> if self.stealth:
<32> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<33> else:
<34> browser = p.chromium.launch(headless=self.headless, ignore_</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 2
<s>scan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' #</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 3
<s>
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
|
|
scrapling.engines.camo/CamoufoxEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
2abb70218f46e9889cafb13cf6d0150aa8711793
|
Adding the proxy support to browser-based Fetchers
|
<s>List[str]] = None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
+ proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
<0> """An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
<1>
<2> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<3> :param block_images: Prevent the loading of images through Firefox preferences.
<4> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<5> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<6> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<7> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<8> :param block_webrtc: Blocks WebRTC entirely.
<9> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<10> :param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
<11> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<12> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<13> :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
<14> :param page_action:</s>
|
===========below chunk 0===========
<s> None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
+ proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 1
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.</s>
===========below chunk 1===========
<s> None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
+ proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 2
<s>('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: typing
_SpecialForm(*args, **kwds)
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
<s>
hide_canvas: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
google_search: Optional[bool] = True,
extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
adaptor_arguments: Dict = None
):
"""An engine that utilizes PlayWright library, check the `PlayWrightFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
:param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
:param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param disable_webgl: Disables WebGL and WebGL</s>
===========changed ref 1===========
<s>: bool = True,
disable_webgl: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False,
nstbrowser_config: Optional[Dict] = None,
google_search: Optional[bool] = True,
extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
adaptor_arguments: Dict = None
):
# offset: 1
<s>_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param disable_webgl: Disables WebGL and WebGL 2.0 support entirely.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
:param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
+ :param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.disable_resources = disable_resources
self.network_idle = bool(network_idle)
self.stealth = bool(stealth)</s>
|
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
2abb70218f46e9889cafb13cf6d0150aa8711793
|
Adding the proxy support to browser-based Fetchers
|
<13>:<add> proxy=self.proxy,
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> addons=self.addons,
<12> humanize=self.humanize,
<13> i_know_what_im_doing=True, # To turn warnings off with user configurations
<14> ) as browser:
<15> page = browser.new_page()
<16> page.set_default_navigation_timeout(self.timeout)
<17> page.set_default_timeout(self.timeout)
<18> if self.disable_resources:
<19> page.route("**/*", intercept_route)
<20>
<21> if self.extra_headers:
<22> page.set_extra_http_headers(self.extra_headers)
<23>
<24> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<25> page.wait_for_load_state(state="domcontentloaded")
<26> if self.network_idle:
<27> page.wait_for_load_state('networkidle')
<28>
<29> page = self.page_action(page)
<30>
<31> if self.wait_selector and type(self.wait_selector) is str:
<32> waiter = page.locator(self.wait_selector)
<33> waiter.wait_</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str='utf-8')
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
<s>List[str]] = None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
+ proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
"""An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
:param block_images: Prevent the loading of images through Firefox preferences.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param block_webrtc: Blocks WebRTC entirely.
:param addons: List of Firefox addons to use. Must be paths to extracted addons.
:param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
:param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
:param page_action: Added for automation. A function that takes the `page` object, does</s>
===========changed ref 1===========
<s> None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
+ proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 1
<s> page. The default is 30000
:param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
+ :param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_</s>
===========changed ref 2===========
<s> None,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, adaptor_arguments: Dict = None
+ proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 2
<s> {}
+ self.proxy = construct_proxy_dict(proxy)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
|
scrapling.fetchers/StealthyFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
2abb70218f46e9889cafb13cf6d0150aa8711793
|
Adding the proxy support to browser-based Fetchers
|
<s>_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict[str, str]]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None
) -> Response:
<0> """
<1> Opens up a browser and do your request based on your chosen options below.
<2> :param url: Target url.
<3> :param headless: Run the browser in headless/hidden (default), 'virtual' screen mode, or headful/visible mode.
<4> :param block_images: Prevent the loading of images through Firefox preferences.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<7> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<8> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<9> :param block_webrtc: Blocks WebRTC entirely.
<10> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<11> :param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
<12> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<13> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<14> :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
<15> :param</s>
|
===========below chunk 0===========
<s>str] = None, humanize: Optional[Union[bool, float]] = True,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict[str, str]]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None
) -> Response:
# offset: 1
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
engine = CamoufoxEngine(
timeout=timeout,
headless=headless,
page_action=page_action,
block_images=block_images,
block_webrtc=block_webrtc,
addons=addons,
humanize=humanize,
allow_webgl=allow_webgl,
disable_resources=disable_resources,
network_idle=network_idle,
wait_selector=wait_selector,
wait_selector_state=wait_selector_state,
google_search=google_search,
extra_headers=extra_headers,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.camo
CamoufoxEngine(headless: Optional[Union[bool, Literal['virtual']]]=True, block_images: Optional[bool]=False, disable_resources: Optional[bool]=False, block_webrtc: Optional[bool]=False, allow_webgl: Optional[bool]=False, network_idle: Optional[bool]=False, humanize: Optional[Union[bool, float]]=True, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, addons: Optional[List[str]]=None, wait_selector_state: str='attached', google_search: Optional[bool]=True, extra_headers: Optional[Dict[str, str]]=None, adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str='utf-8')
do_nothing(page)
at: scrapling.engines.toolbelt.custom.BaseFetcher.__init__
self.adaptor_arguments = dict(
huge_tree=huge_tree,
keep_comments=keep_comments,
auto_match=auto_match,
storage=storage,
storage_args=storage_args,
debug=debug,
)
at: typing
_SpecialForm(*args, **kwds)
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: scrapling.engines.toolbelt.navigation
+ def construct_proxy_dict(proxy_string: Union[str, Dict[str, str]]) -> Union[Dict, None]:
+ """Validate a proxy and return it in the acceptable format for Playwright
+ Reference: https://playwright.dev/python/docs/network#http-proxy
+
+ :param proxy_string: A string or a dictionary representation of the proxy.
+ :return:
+ """
+ if proxy_string:
+ if isinstance(proxy_string, str):
+ proxy = urlparse(proxy_string)
+ try:
+ return {
+ 'server': f'{proxy.scheme}://{proxy.hostname}:{proxy.port}',
+ 'username': proxy.username or '',
+ 'password': proxy.password or '',
+ }
+ except ValueError:
+ # Urllib will say that one of the parameters above can't be casted to the correct type like `int` for port etc...
+ raise TypeError(f'The proxy argument\'s string is in invalid format!')
+
+ elif isinstance(proxy_string, dict):
+ valid_keys = ('server', 'username', 'password', )
+ if all(key in valid_keys for key in proxy_string.keys()) and not any(key not in valid_keys for key in proxy_string.keys()):
+ return proxy_string
+ else:
+ raise TypeError(f'A proxy dictionary must have only these keys: {valid_keys}')
+
+ else:
+ raise TypeError(f'Invalid type of proxy ({type(proxy_string)}), the proxy argument must be a string or a dictionary!')
+
+ # The default value for proxy in Playwright's source is `None`
+ return None
+
===========changed ref 1===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
with Camoufox(
headless=self.headless,
block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
os=get_os_name(),
block_webrtc=self.block_webrtc,
allow_webgl=self.allow_webgl,
addons=self.addons,
humanize=self.humanize,
+ proxy=self.proxy,
i_know_what_im_doing=True, # To turn warnings off with user configurations
) as browser:
page = browser.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('</s>
|
|
scrapling.fetchers/PlayWrightFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
2abb70218f46e9889cafb13cf6d0150aa8711793
|
Adding the proxy support to browser-based Fetchers
|
<s>str] = 'attached',
hide_canvas: bool = True, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: Optional[bool] = True,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
stealth: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
) -> Response:
<0> """Opens up a browser and do your request based on your chosen options below.
<1> :param url: Target url.
<2> :param headless: Run the browser in headless/hidden (default), or headful/visible mode.
<3> :param disable_resources: Drop requests of unnecessary resources for speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<4> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
<7> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<8> :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
<9> :param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
<10> :param wait_selector: Wait for a specific css selector to be in a specific state.
<11> :param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
<12> :param stealth: Enables stealth mode, check the documentation to see what stealth mode does currently.
<13> :param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
<14> </s>
|
===========below chunk 0===========
<s>',
hide_canvas: bool = True, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: Optional[bool] = True,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
stealth: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
) -> Response:
# offset: 1
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers/NSTBrowser through CDP.
:param nstbrowser_mode: Enables NSTBrowser mode, it have to be used with `cdp_url` argument or it will get completely ignored.
:param nstbrowser_config: The config you want to send with requests to the NSTBrowser. If left empty, Scrapling defaults to an optimized NSTBrowser's docker browserless config.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
engine = PlaywrightEngine(
timeout=timeout,
stealth=stealth,
cdp_url=cdp_url,
headless=headless,
useragent=useragent,
page_action=page_action,
hide_canvas=hide_canvas,
network_idle=network_idle,
google_search=google_search,
extra_headers=extra_headers,
wait_selector=wait_selector,
disable_webgl=disable_webgl,
nstbrowser_mode=</s>
===========below chunk 1===========
<s>',
hide_canvas: bool = True, disable_webgl: bool = False, extra_headers: Optional[Dict[str, str]] = None, google_search: Optional[bool] = True,
+ proxy: Optional[Union[str, Dict[str, str]]] = None,
stealth: bool = False,
cdp_url: Optional[str] = None,
nstbrowser_mode: bool = False, nstbrowser_config: Optional[Dict] = None,
) -> Response:
# offset: 2
<s>
wait_selector=wait_selector,
disable_webgl=disable_webgl,
nstbrowser_mode=nstbrowser_mode,
nstbrowser_config=nstbrowser_config,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.pw
PlaywrightEngine(headless: Union[bool, str]=True, disable_resources: bool=False, useragent: Optional[str]=None, network_idle: Optional[bool]=False, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, wait_selector_state: Optional[str]='attached', stealth: bool=False, hide_canvas: bool=True, disable_webgl: bool=False, cdp_url: Optional[str]=None, nstbrowser_mode: bool=False, nstbrowser_config: Optional[Dict]=None, google_search: Optional[bool]=True, extra_headers: Optional[Dict[str, str]]=None, adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str='utf-8')
do_nothing(page)
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
<s>_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
+ wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict[str, str]]] = None,
- wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None
) -> Response:
"""
Opens up a browser and do your request based on your chosen options below.
:param url: Target url.
:param headless: Run the browser in headless/hidden (default), 'virtual' screen mode, or headful/visible mode.
:param block_images: Prevent the loading of images through Firefox preferences.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param block_webrtc: Blocks WebRTC entirely.
:param addons: List of Firefox addons to use. Must be paths to extracted addons.
:param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
:param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
:param page_action: Added for automation. A function that takes the `page`</s>
|
|
scrapling.engines.camo/CamoufoxEngine.__init__
|
Modified
|
D4Vinci~Scrapling
|
5e8275c3eb5c18e4688d093f60918e610b484b1a
|
Adding the option to randomize the OS fingerprints with the StealthyFetcher
|
<13>:<add> :param os_randomize: If enabled, Scrapling will randomize the OS fingerprints used. The default is Scrapling matching the fingerprints with the current OS.
|
<s>] = None, addons: Optional[List[str]] = None,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: Optional[bool] = None, adaptor_arguments: Dict = None
- proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
<0> """An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
<1>
<2> :param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
<3> :param block_images: Prevent the loading of images through Firefox preferences.
<4> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<5> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<6> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<7> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<8> :param block_webrtc: Blocks WebRTC entirely.
<9> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<10> :param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
<11> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<12> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<13> :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
<14> :param page_action:</s>
|
===========below chunk 0===========
<s>: Optional[List[str]] = None,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: Optional[bool] = None, adaptor_arguments: Dict = None
- proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 1
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.proxy = construct_proxy_dict(proxy)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_</s>
===========below chunk 1===========
<s>: Optional[List[str]] = None,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: Optional[bool] = None, adaptor_arguments: Dict = None
- proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 2
<s>(timeout, [int, float], 30000)
if callable(page_action):
self.page_action = page_action
else:
self.page_action = do_nothing
logging.error('[Ignored] Argument "page_action" must be callable')
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
self.adaptor_arguments = adaptor_arguments if adaptor_arguments else {}
===========unchanged ref 0===========
at: logging
error(msg: Any, *args: Any, exc_info: _ExcInfoType=..., stack_info: bool=..., extra: Optional[Dict[str, Any]]=..., **kwargs: Any) -> None
at: scrapling.engines.toolbelt.custom
check_type_validity(variable: Any, valid_types: Union[List[Type], None], default_value: Any=None, critical: bool=False, param_name: Optional[str]=None) -> Any
do_nothing(page)
at: scrapling.engines.toolbelt.navigation
construct_proxy_dict(proxy_string: Union[str, Dict[str, str]]) -> Union[Dict, None]
at: typing
_SpecialForm(*args, **kwds)
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
5e8275c3eb5c18e4688d093f60918e610b484b1a
|
Adding the option to randomize the OS fingerprints with the StealthyFetcher
|
<6>:<add> proxy=self.proxy,
<add> addons=self.addons,
<7>:<add> humanize=self.humanize,
<add> i_know_what_im_doing=True, # To turn warnings off with the user configurations
<add> allow_webgl=self.allow_webgl,
<add> block_webrtc=self.block_webrtc,
<8>:<del> os=get_os_name(),
<9>:<del> block_webrtc=self.block_webrtc,
<10>:<del> allow_webgl=self.allow_webgl,
<11>:<del> addons=self.addons,
<12>:<del> humanize=self.humanize,
<13>:<del> proxy=self.proxy,
<14>:<del> i_know_what_im_doing=True, # To turn warnings off with user configurations
<15>:<add> os=None if self.os_randomize else get_os_name(),
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
<4> """
<5> with Camoufox(
<6> headless=self.headless,
<7> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<8> os=get_os_name(),
<9> block_webrtc=self.block_webrtc,
<10> allow_webgl=self.allow_webgl,
<11> addons=self.addons,
<12> humanize=self.humanize,
<13> proxy=self.proxy,
<14> i_know_what_im_doing=True, # To turn warnings off with user configurations
<15> ) as browser:
<16> page = browser.new_page()
<17> page.set_default_navigation_timeout(self.timeout)
<18> page.set_default_timeout(self.timeout)
<19> if self.disable_resources:
<20> page.route("**/*", intercept_route)
<21>
<22> if self.extra_headers:
<23> page.set_extra_http_headers(self.extra_headers)
<24>
<25> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<26> page.wait_for_load_state(state="domcontentloaded")
<27> if self.network_idle:
<28> page.wait_for_load_state('networkidle')
<29>
<30> page = self.page_action(page)
<31>
<32> if self.wait_selector and type(self.wait_selector) is str:
<33> waiter = page.locator(self.wait_</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.extra_headers = extra_headers or {}
self.proxy = construct_proxy_dict(proxy)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str='utf-8')
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
<s>] = None, addons: Optional[List[str]] = None,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: Optional[bool] = None, adaptor_arguments: Dict = None
- proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
"""An engine that utilizes Camoufox library, check the `StealthyFetcher` class for more documentation.
:param headless: Run the browser in headless/hidden (default), virtual screen mode, or headful/visible mode.
:param block_images: Prevent the loading of images through Firefox preferences.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
:param block_webrtc: Blocks WebRTC entirely.
:param addons: List of Firefox addons to use. Must be paths to extracted addons.
:param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
:param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
+ :param os_randomize: If enabled, Scrapling will randomize the OS fingerprints used. The default is Scrapling matching the fingerprints with the current OS.
:param timeout: The timeout</s>
===========changed ref 1===========
<s>: Optional[List[str]] = None,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None,
+ proxy: Optional[Union[str, Dict[str, str]]] = None, os_randomize: Optional[bool] = None, adaptor_arguments: Dict = None
- proxy: Optional[Union[str, Dict[str, str]]] = None, adaptor_arguments: Dict = None
):
# offset: 1
<s> the OS fingerprints used. The default is Scrapling matching the fingerprints with the current OS.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
:param page_action: Added for automation. A function that takes the `page` object, does the automation you need, then returns `page` again.
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param adaptor_arguments: The arguments that will be passed in the end while creating the final Adaptor's class.
"""
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_</s>
|
scrapling.fetchers/StealthyFetcher.fetch
|
Modified
|
D4Vinci~Scrapling
|
5e8275c3eb5c18e4688d093f60918e610b484b1a
|
Adding the option to randomize the OS fingerprints with the StealthyFetcher
|
<14>:<add> :param os_randomize: If enabled, Scrapling will randomize the OS fingerprints used. The default is Scrapling matching the fingerprints with the current OS.
|
<s> = None,
timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict[str, str]]] = None,
+ os_randomize: Optional[bool] = None
) -> Response:
<0> """
<1> Opens up a browser and do your request based on your chosen options below.
<2> :param url: Target url.
<3> :param headless: Run the browser in headless/hidden (default), 'virtual' screen mode, or headful/visible mode.
<4> :param block_images: Prevent the loading of images through Firefox preferences.
<5> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<6> :param disable_resources: Drop requests of unnecessary resources for a speed boost. It depends but it made requests ~25% faster in my tests for some websites.
<7> Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
<8> This can help save your proxy usage but be careful with this option as it makes some websites never finish loading.
<9> :param block_webrtc: Blocks WebRTC entirely.
<10> :param addons: List of Firefox addons to use. Must be paths to extracted addons.
<11> :param humanize: Humanize the cursor movement. Takes either True or the MAX duration in seconds of the cursor movement. The cursor typically takes up to 1.5 seconds to move across the window.
<12> :param allow_webgl: Whether to allow WebGL. To prevent leaks, only use this for special cases.
<13> :param network_idle: Wait for the page until there are no network connections for at least 500 ms.
<14> :param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30000
<15> :param</s>
|
===========below chunk 0===========
<s> timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict[str, str]]] = None,
+ os_randomize: Optional[bool] = None
) -> Response:
# offset: 1
:param wait_selector: Wait for a specific css selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. Default state is `attached`.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search for this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
engine = CamoufoxEngine(
proxy=proxy,
addons=addons,
timeout=timeout,
headless=headless,
humanize=humanize,
allow_webgl=allow_webgl,
page_action=page_action,
network_idle=network_idle,
block_images=block_images,
block_webrtc=block_webrtc,
wait_selector=wait_selector,
google_search=google_search,
extra_headers=extra_headers,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
adaptor_arguments=self</s>
===========below chunk 1===========
<s> timeout: Optional[float] = 30000, page_action: Callable = do_nothing, wait_selector: Optional[str] = None, humanize: Optional[Union[bool, float]] = True,
wait_selector_state: str = 'attached', google_search: Optional[bool] = True, extra_headers: Optional[Dict[str, str]] = None, proxy: Optional[Union[str, Dict[str, str]]] = None,
+ os_randomize: Optional[bool] = None
) -> Response:
# offset: 2
<s> disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
adaptor_arguments=self.adaptor_arguments,
)
return engine.fetch(url)
===========unchanged ref 0===========
at: scrapling.engines.camo
CamoufoxEngine(headless: Optional[Union[bool, Literal['virtual']]]=True, block_images: Optional[bool]=False, disable_resources: Optional[bool]=False, block_webrtc: Optional[bool]=False, allow_webgl: Optional[bool]=False, network_idle: Optional[bool]=False, humanize: Optional[Union[bool, float]]=True, timeout: Optional[float]=30000, page_action: Callable=do_nothing, wait_selector: Optional[str]=None, addons: Optional[List[str]]=None, wait_selector_state: str='attached', google_search: Optional[bool]=True, extra_headers: Optional[Dict[str, str]]=None, proxy: Optional[Union[str, Dict[str, str]]]=None, adaptor_arguments: Dict=None)
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str='utf-8')
do_nothing(page)
at: typing
_SpecialForm(*args, **kwds)
Callable = _CallableType(collections.abc.Callable, 2)
List = _alias(list, 1, inst=False, name='List')
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A Response object with `url`, `text`, `content`, `status`, `reason`, `encoding`, `cookies`, `headers`, `request_headers`, and the `adaptor` class for parsing, of course.
"""
with Camoufox(
+ proxy=self.proxy,
+ addons=self.addons,
headless=self.headless,
+ humanize=self.humanize,
+ i_know_what_im_doing=True, # To turn warnings off with the user configurations
+ allow_webgl=self.allow_webgl,
+ block_webrtc=self.block_webrtc,
block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
- os=get_os_name(),
- block_webrtc=self.block_webrtc,
- allow_webgl=self.allow_webgl,
- addons=self.addons,
- humanize=self.humanize,
- proxy=self.proxy,
- i_know_what_im_doing=True, # To turn warnings off with user configurations
+ os=None if self.os_randomize else get_os_name(),
) as browser:
page = browser.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for</s>
===========changed ref 1===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
<s>url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
|
scrapling.parser/Adaptor.__init__
|
Modified
|
D4Vinci~Scrapling
|
572df6b3b48955ffac484e4fa3691a9b41838f8e
|
Fixing the way `Response` object handles sub items in some edge cases
|
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
<0> """The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
<1> with expressions in CSS, XPath, or with simply text. Check the docs for more info.
<2>
<3> Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
<4> inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
<5> not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
<6> It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
<7>
<8> :param text: HTML body passed as text.
<9> :param url: allows storing a URL with the html data for retrieving later.
<10> :param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
<11> :param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
<12> :param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
<13> libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
<14> :param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
<15> Don't use it unless you know what you are doing!
<16> :param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
<17> :param auto_match: Glob</s>
|
===========below chunk 0===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 1
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
# https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False), encoding=encoding,
compact=True, huge_tree=huge_tree, default_doctype=True
)
self._root = etree.fromstring(body, parser=parser, base_url=url)
if is_jsonable(text or</s>
===========below chunk 1===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 2
<s>._root = etree.fromstring(body, parser=parser, base_url=url)
if is_jsonable(text or body.decode()):
self.__text = TextHandler(text or body.decode())
else:
# All html types inherits from HtmlMixin so this to check for all at once
if not issubclass(type(root), html.HtmlMixin):
raise TypeError(
f"Root have to be a valid element of `html` module types to work, not of type {type(root)}"
)
self._root = root
setup_basic_logging(level='debug' if debug else 'info')
self.__auto_match_enabled = auto_match
if self.__auto_match_enabled:
if not storage_args:
storage_args = {
'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'),
'url': url
}
if not hasattr(storage, '__wrapped__'):
raise ValueError("Storage class must be wrapped with cache decorator, see docs for info")
if not issubclass(storage.__wrapped__, StorageSystemMixin):
raise ValueError("Storage system must be inherited from class `StorageSystemMixin`")
self._storage = storage(**storage_args)
self.__keep_comments = keep_comments
self.__huge_tree_enabled = huge_tree
self.encoding = encoding
self.url = url
===========unchanged ref 0===========
at: functools._lru_cache_wrapper
__wrapped__: Callable[..., _T]
at: os.path
join(a: StrPath, *paths: StrPath) -> str
join(a: BytesPath, *paths: BytesPath) -> bytes
dirname(p: _PathLike[AnyStr]) -> AnyStr
dirname(p: AnyStr) -> AnyStr
at: scrapling.core.custom_types
TextHandler(o: object=...)
TextHandler(o: bytes, encoding: str=..., errors: str=...)
at: scrapling.core.storage_adaptors
StorageSystemMixin(url: Union[str, None]=None)
SQLiteStorageSystem(storage_file: str, url: Union[str, None]=None)
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.core.utils
is_jsonable(content: Union[bytes, str]) -> bool
setup_basic_logging(level: str='debug')
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.parser.Adaptor.attrib
self.__attributes = AttributesHandler(self._root.attrib)
at: scrapling.parser.Adaptor.tag
self.__tag = self._root.tag
at: scrapling.parser.Adaptor.text
self.__text = TextHandler(self._root.text)
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
|
|
scrapling.parser/Adaptor.__get_correct_result
|
Modified
|
D4Vinci~Scrapling
|
572df6b3b48955ffac484e4fa3691a9b41838f8e
|
Fixing the way `Response` object handles sub items in some edge cases
|
<6>:<add>
<7>:<add> root=element,
<add> text='', body=b'', # Since root argument is provided, both `text` and `body` will be ignored so this is just a filler
<add> url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
<del> root=element, url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
<9>:<add> huge_tree=self.__huge_tree_enabled, debug=self.__debug,
<del> huge_tree=self.__huge_tree_enabled, debug=self.__debug
<10>:<add> **self.__response_data
|
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def __get_correct_result(
self, element: Union[html.HtmlElement, etree._ElementUnicodeResult]
) -> Union[TextHandler, html.HtmlElement, 'Adaptor', str]:
<0> """Used internally in all functions to convert results to type (Adaptor|Adaptors) when possible"""
<1> if self._is_text_node(element):
<2> # etree._ElementUnicodeResult basically inherit from `str` so it's fine
<3> return TextHandler(str(element))
<4> else:
<5> if issubclass(type(element), html.HtmlMixin):
<6> return self.__class__(
<7> root=element, url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
<8> keep_comments=True, # if the comments are already removed in initialization, no need to try to delete them in sub-elements
<9> huge_tree=self.__huge_tree_enabled, debug=self.__debug
<10> )
<11> return element
<12>
|
===========unchanged ref 0===========
at: scrapling.core.custom_types
TextHandler(o: object=...)
TextHandler(o: bytes, encoding: str=..., errors: str=...)
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
_is_text_node(element: Union[html.HtmlElement, etree._ElementUnicodeResult]) -> bool
body = html_content
at: scrapling.parser.Adaptor.__init__
self.__auto_match_enabled = auto_match
self.__huge_tree_enabled = huge_tree
self.encoding = encoding
self.url = url
self.__debug = debug
===========changed ref 0===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
===========changed ref 1===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 1
<s> for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
# https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False</s>
===========changed ref 2===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 2
<s>=encoding,
compact=True, huge_tree=huge_tree, default_doctype=True
)
self._root = etree.fromstring(body, parser=parser, base_url=url)
if is_jsonable(text or body.decode()):
self.__text = TextHandler(text or body.decode())
else:
# All html types inherits from HtmlMixin so this to check for all at once
if not issubclass(type(root), html.HtmlMixin):
raise TypeError(
f"Root have to be a valid element of `html` module types to work, not of type {type(root)}"
)
self._root = root
setup_basic_logging(level='debug' if debug else 'info')
self.__auto_match_enabled = auto_match
if self.__auto_match_enabled:
if not storage_args:
storage_args = {
'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'),
'url': url
}
if not hasattr(storage, '__wrapped__'):
raise ValueError("Storage class must be wrapped with cache decorator, see docs for info")
if not issubclass(storage.__wrapped__, StorageSystemMixin):
raise ValueError("Storage system must be inherited from class `StorageSystemMixin`")
self._storage = storage(**storage_args)
self.__keep_comments =</s>
===========changed ref 3===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 3
<s>comments
self.__huge_tree_enabled = huge_tree
self.encoding = encoding
self.url = url
# For selector stuff
self.__attributes = None
self.__tag = None
self.__debug = debug
+ # No need to check if all response attributes exist or not because if `status` exist, then the rest exist (Save some CPU cycles for speed)
+ self.__response_data = {
+ key: getattr(self, key) for key in ('status', 'reason', 'cookies', 'headers', 'request_headers',)
+ } if hasattr(self, 'status') else {}
|
scrapling.engines.toolbelt.custom/Response.__init__
|
Modified
|
D4Vinci~Scrapling
|
572df6b3b48955ffac484e4fa3691a9b41838f8e
|
Fixing the way `Response` object handles sub items in some edge cases
|
<1>:<del> super().__init__(text=text, body=content, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
<2>:<del>
<8>:<add> super().__init__(text=text, body=body, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
|
<s>class Response(Adaptor):
+ def __init__(self, url: str, text: str, body: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, encoding: str = 'utf-8', **adaptor_arguments: Dict):
- def __init__(self, url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str = 'utf-8'):
<0> automatch_domain = adaptor_arguments.pop('automatch_domain', None)
<1> super().__init__(text=text, body=content, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
<2>
<3> self.status = status
<4> self.reason = reason
<5> self.cookies = cookies
<6> self.headers = headers
<7> self.request_headers = request_headers
<8> # For back-ward compatibility
<9> self.adaptor = self
<10>
|
===========unchanged ref 0===========
at: scrapling.parser.Adaptor
__slots__ = (
'url', 'encoding', '__auto_match_enabled', '_root', '_storage', '__debug',
'__keep_comments', '__huge_tree_enabled', '__attributes', '__text', '__tag',
)
__init__(self, text: Optional[str]=None, url: Optional[str]=None, body: bytes=b"", encoding: str="utf8", huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=True, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True)
__init__(text: Optional[str]=None, url: Optional[str]=None, body: bytes=b"", encoding: str="utf8", huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=True, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True)
body = html_content
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
at: typing.MutableMapping
pop(key: _KT) -> _VT
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
===========changed ref 0===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
===========changed ref 1===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 1
<s> for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
# https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False</s>
===========changed ref 2===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 2
<s>=encoding,
compact=True, huge_tree=huge_tree, default_doctype=True
)
self._root = etree.fromstring(body, parser=parser, base_url=url)
if is_jsonable(text or body.decode()):
self.__text = TextHandler(text or body.decode())
else:
# All html types inherits from HtmlMixin so this to check for all at once
if not issubclass(type(root), html.HtmlMixin):
raise TypeError(
f"Root have to be a valid element of `html` module types to work, not of type {type(root)}"
)
self._root = root
setup_basic_logging(level='debug' if debug else 'info')
self.__auto_match_enabled = auto_match
if self.__auto_match_enabled:
if not storage_args:
storage_args = {
'storage_file': os.path.join(os.path.dirname(__file__), 'elements_storage.db'),
'url': url
}
if not hasattr(storage, '__wrapped__'):
raise ValueError("Storage class must be wrapped with cache decorator, see docs for info")
if not issubclass(storage.__wrapped__, StorageSystemMixin):
raise ValueError("Storage system must be inherited from class `StorageSystemMixin`")
self._storage = storage(**storage_args)
self.__keep_comments =</s>
|
scrapling.engines.static/StaticEngine._prepare_response
|
Modified
|
D4Vinci~Scrapling
|
572df6b3b48955ffac484e4fa3691a9b41838f8e
|
Fixing the way `Response` object handles sub items in some edge cases
|
<8>:<add> body=response.content,
<del> content=response.content,
<15>:<add> **self.adaptor_arguments
<del> adaptor_arguments=self.adaptor_arguments
|
# module: scrapling.engines.static
class StaticEngine:
def _prepare_response(self, response: httpxResponse) -> Response:
<0> """Takes httpx response and generates `Response` object from it.
<1>
<2> :param response: httpx response object
<3> :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
<4> """
<5> return Response(
<6> url=str(response.url),
<7> text=response.text,
<8> content=response.content,
<9> status=response.status_code,
<10> reason=response.reason_phrase,
<11> encoding=response.encoding or 'utf-8',
<12> cookies=dict(response.cookies),
<13> headers=dict(response.headers),
<14> request_headers=dict(response.request.headers),
<15> adaptor_arguments=self.adaptor_arguments
<16> )
<17>
|
===========changed ref 0===========
<s>class Response(Adaptor):
+ def __init__(self, url: str, text: str, body: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, encoding: str = 'utf-8', **adaptor_arguments: Dict):
- def __init__(self, url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str = 'utf-8'):
automatch_domain = adaptor_arguments.pop('automatch_domain', None)
- super().__init__(text=text, body=content, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
-
self.status = status
self.reason = reason
self.cookies = cookies
self.headers = headers
self.request_headers = request_headers
+ super().__init__(text=text, body=body, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
# For back-ward compatibility
self.adaptor = self
===========changed ref 1===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def __get_correct_result(
self, element: Union[html.HtmlElement, etree._ElementUnicodeResult]
) -> Union[TextHandler, html.HtmlElement, 'Adaptor', str]:
"""Used internally in all functions to convert results to type (Adaptor|Adaptors) when possible"""
if self._is_text_node(element):
# etree._ElementUnicodeResult basically inherit from `str` so it's fine
return TextHandler(str(element))
else:
if issubclass(type(element), html.HtmlMixin):
+
return self.__class__(
+ root=element,
+ text='', body=b'', # Since root argument is provided, both `text` and `body` will be ignored so this is just a filler
+ url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
- root=element, url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
keep_comments=True, # if the comments are already removed in initialization, no need to try to delete them in sub-elements
+ huge_tree=self.__huge_tree_enabled, debug=self.__debug,
- huge_tree=self.__huge_tree_enabled, debug=self.__debug
+ **self.__response_data
)
return element
===========changed ref 2===========
<s> None,
url: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
"""The main class that works as a wrapper for the HTML input data. Using this class, you can search for elements
with expressions in CSS, XPath, or with simply text. Check the docs for more info.
Here we try to extend module ``lxml.html.HtmlElement`` while maintaining a simpler interface, We are not
inheriting from the ``lxml.html.HtmlElement`` because it's not pickleable which makes a lot of reference jobs
not possible. You can test it here and see code explodes with `AssertionError: invalid Element proxy at...`.
It's an old issue with lxml, see `this entry <https://bugs.launchpad.net/lxml/+bug/736708>`
:param text: HTML body passed as text.
:param url: allows storing a URL with the html data for retrieving later.
:param body: HTML body as ``bytes`` object. It can be used instead of the ``text`` argument.
:param encoding: The encoding type that will be used in HTML parsing, default is `UTF-8`
:param huge_tree: Enabled by default, should always be enabled when parsing large HTML documents. This controls
libxml2 feature that forbids parsing certain large documents to protect from possible memory exhaustion.
:param root: Used internally to pass etree objects instead of text/body arguments, it takes highest priority.
Don't use it unless you know what you are doing!
:param keep_comments: While parsing the HTML body, drop comments or not. Disabled by default for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
</s>
===========changed ref 3===========
<s>: Optional[str] = None,
body: bytes = b"",
encoding: str = "utf8",
huge_tree: bool = True,
root: Optional[html.HtmlElement] = None,
keep_comments: Optional[bool] = False,
auto_match: Optional[bool] = True,
storage: Any = SQLiteStorageSystem,
storage_args: Optional[Dict] = None,
debug: Optional[bool] = True,
+ **kwargs
):
# offset: 1
<s> for obvious reasons
:param auto_match: Globally turn-off the auto-match feature in all functions, this argument takes higher
priority over all auto-match related arguments/functions in the class.
:param storage: The storage class to be passed for auto-matching functionalities, see ``Docs`` for more info.
:param storage_args: A dictionary of ``argument->value`` pairs to be passed for the storage class.
If empty, default values will be used.
:param debug: Enable debug mode
"""
if root is None and not body and text is None:
raise ValueError("Adaptor class needs text, body, or root arguments to work")
self.__text = None
if root is None:
if text is None:
if not body or not isinstance(body, bytes):
raise TypeError(f"body argument must be valid and of type bytes, got {body.__class__}")
body = body.replace(b"\x00", b"").strip()
else:
if not isinstance(text, str):
raise TypeError(f"text argument must be of type str, got {text.__class__}")
body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>"
# https://lxml.de/api/lxml.etree.HTMLParser-class.html
parser = html.HTMLParser(
recover=True, remove_blank_text=True, remove_comments=(keep_comments is False</s>
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
572df6b3b48955ffac484e4fa3691a9b41838f8e
|
Fixing the way `Response` object handles sub items in some edge cases
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
<4> """
<5> if not self.stealth:
<6> from playwright.sync_api import sync_playwright
<7> else:
<8> from rebrowser_playwright.sync_api import sync_playwright
<9>
<10> with sync_playwright() as p:
<11> # Handle the UserAgent early
<12> if self.useragent:
<13> extra_headers = {}
<14> useragent = self.useragent
<15> else:
<16> extra_headers = generate_headers(browser_mode=True)
<17> useragent = extra_headers.get('User-Agent')
<18>
<19> # Prepare the flags before diving
<20> flags = DEFAULT_STEALTH_FLAGS
<21> if self.hide_canvas:
<22> flags += ['--fingerprinting-canvas-image-data-noise']
<23> if self.disable_webgl:
<24> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<25>
<26> # Creating the browser
<27> if self.cdp_url:
<28> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<29> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<30> else:
<31> if self.stealth:
<32> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<33> else:
<34> browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
proxy=self.proxy,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.b</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 2
<s>ector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 3
<s>utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
|
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
572df6b3b48955ffac484e4fa3691a9b41838f8e
|
Fixing the way `Response` object handles sub items in some edge cases
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
<4> """
<5> with Camoufox(
<6> proxy=self.proxy,
<7> addons=self.addons,
<8> headless=self.headless,
<9> humanize=self.humanize,
<10> i_know_what_im_doing=True, # To turn warnings off with the user configurations
<11> allow_webgl=self.allow_webgl,
<12> block_webrtc=self.block_webrtc,
<13> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<14> os=None if self.os_randomize else get_os_name(),
<15> ) as browser:
<16> page = browser.new_page()
<17> page.set_default_navigation_timeout(self.timeout)
<18> page.set_default_timeout(self.timeout)
<19> if self.disable_resources:
<20> page.route("**/*", intercept_route)
<21>
<22> if self.extra_headers:
<23> page.set_extra_http_headers(self.extra_headers)
<24>
<25> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<26> page.wait_for_load_state(state="domcontentloaded")
<27> if self.network_idle:
<28> page.wait_for_load_state('networkidle')
<29>
<30> page = self.page_action(page)
<31>
<32> if self.wait_selector and type(self.wait_selector) is str:
<33> waiter = page.locator(self</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
content=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
adaptor_arguments=self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.os_randomize = bool(os_randomize)
self.extra_headers = extra_headers or {}
self.proxy = construct_proxy_dict(proxy)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str='utf-8')
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
<s>class Response(Adaptor):
+ def __init__(self, url: str, text: str, body: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, encoding: str = 'utf-8', **adaptor_arguments: Dict):
- def __init__(self, url: str, text: str, content: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, adaptor_arguments: Dict, encoding: str = 'utf-8'):
automatch_domain = adaptor_arguments.pop('automatch_domain', None)
- super().__init__(text=text, body=content, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
-
self.status = status
self.reason = reason
self.cookies = cookies
self.headers = headers
self.request_headers = request_headers
+ super().__init__(text=text, body=body, url=automatch_domain or url, encoding=encoding, **adaptor_arguments)
# For back-ward compatibility
self.adaptor = self
===========changed ref 1===========
# module: scrapling.engines.static
class StaticEngine:
def _prepare_response(self, response: httpxResponse) -> Response:
"""Takes httpx response and generates `Response` object from it.
:param response: httpx response object
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
"""
return Response(
url=str(response.url),
text=response.text,
+ body=response.content,
- content=response.content,
status=response.status_code,
reason=response.reason_phrase,
encoding=response.encoding or 'utf-8',
cookies=dict(response.cookies),
headers=dict(response.headers),
request_headers=dict(response.request.headers),
+ **self.adaptor_arguments
- adaptor_arguments=self.adaptor_arguments
)
===========changed ref 2===========
# module: scrapling.parser
class Adaptor(SelectorsGeneration):
def __get_correct_result(
self, element: Union[html.HtmlElement, etree._ElementUnicodeResult]
) -> Union[TextHandler, html.HtmlElement, 'Adaptor', str]:
"""Used internally in all functions to convert results to type (Adaptor|Adaptors) when possible"""
if self._is_text_node(element):
# etree._ElementUnicodeResult basically inherit from `str` so it's fine
return TextHandler(str(element))
else:
if issubclass(type(element), html.HtmlMixin):
+
return self.__class__(
+ root=element,
+ text='', body=b'', # Since root argument is provided, both `text` and `body` will be ignored so this is just a filler
+ url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
- root=element, url=self.url, encoding=self.encoding, auto_match=self.__auto_match_enabled,
keep_comments=True, # if the comments are already removed in initialization, no need to try to delete them in sub-elements
+ huge_tree=self.__huge_tree_enabled, debug=self.__debug,
- huge_tree=self.__huge_tree_enabled, debug=self.__debug
+ **self.__response_data
)
return element
|
|
scrapling.engines.pw/PlaywrightEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
4c74d9bc972cfce3b5eb46f92593b95b3b4ea095
|
Fixing a bug with reading response bytes in playwright/camoufox when `network_idle` is used
|
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
<4> """
<5> if not self.stealth:
<6> from playwright.sync_api import sync_playwright
<7> else:
<8> from rebrowser_playwright.sync_api import sync_playwright
<9>
<10> with sync_playwright() as p:
<11> # Handle the UserAgent early
<12> if self.useragent:
<13> extra_headers = {}
<14> useragent = self.useragent
<15> else:
<16> extra_headers = generate_headers(browser_mode=True)
<17> useragent = extra_headers.get('User-Agent')
<18>
<19> # Prepare the flags before diving
<20> flags = DEFAULT_STEALTH_FLAGS
<21> if self.hide_canvas:
<22> flags += ['--fingerprinting-canvas-image-data-noise']
<23> if self.disable_webgl:
<24> flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
<25>
<26> # Creating the browser
<27> if self.cdp_url:
<28> cdp_url = self._cdp_url_logic(flags if self.stealth else None)
<29> browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
<30> else:
<31> if self.stealth:
<32> browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
<33> else:
<34> browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable</s>
|
===========below chunk 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
proxy=self.proxy,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io/brotector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.b</s>
===========below chunk 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 2
<s>ector/
# https://pixelscan.net/
# https://iphey.com/
# https://www.browserscan.net/bot-detection <== this one also checks for the CDP runtime fingerprint
# https://arh.antoinevastel.com/bots/areyouheadless/
# https://prescience-data.github.io/execution-monitor.html
page.add_init_script(path=js_bypass_path('webdriver_fully.js'))
page.add_init_script(path=js_bypass_path('window_chrome.js'))
page.add_init_script(path=js_bypass_path('navigator_plugins.js'))
page.add_init_script(path=js_bypass_path('pdf_viewer.js'))
page.add_init_script(path=js_bypass_path('notification_permission.js'))
page.add_init_script(path=js_bypass_path('screen_props.js'))
page.add_init_script(path=js_bypass_path('playwright_fingerprint.js'))
res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
page.wait_for_load_state(state="domcontentloaded")
if self.network_idle:
page.wait_for_load_state('networkidle')
page = self.page_action(page)
if self.wait_selector and type(self.wait_selector) is str:
waiter = page.locator(self.wait_selector)
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding</s>
===========below chunk 2===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 3
<s>utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
body=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
**self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.constants
DEFAULT_STEALTH_FLAGS = [
# Explanation: https://peter.sh/experiments/chromium-command-line-switches/
# Generally this will make the browser faster and less detectable
'--no-pings',
'--incognito',
'--test-type',
'--lang=en-US',
'--mute-audio',
'--no-first-run',
'--disable-sync',
'--hide-scrollbars',
'--disable-logging',
'--start-maximized', # For headless check bypass
'--enable-async-dns',
'--disable-breakpad',
'--disable-infobars',
'--accept-lang=en-US',
'--use-mock-keychain',
'--disable-translate',
'--disable-extensions',
'--disable-voice-input',
'--window-position=0,0',
'--disable-wake-on-wifi',
'--ignore-gpu-blocklist',
'--enable-tcp-fast-open',
'--enable-web-bluetooth',
'--disable-hang-monitor',
'--password-store=basic',
'--disable-cloud-import',
'--disable-default-apps',
'--disable-print-preview',
'--disable-dev-shm-usage',
'--disable-popup-blocking',
'--metrics-recording-only',
'--disable-crash-reporter',
'--disable-partial-raster',
'--disable-gesture-typing',
'--disable-checker-imaging',
'--disable-prompt-on-repost',
'--force-color-profile=srgb',
'--font-render-hinting=none',
'--no-default-browser-check',
'--aggressive-cache-discard',
'--disable-component-update',
'--disable-cookie-encryption',
'--disable-domain-reliability',
'--disable-threaded-animation',
'--disable-threaded-scrolling',
# '--disable-reading-from-canvas',</s>
|
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
4c74d9bc972cfce3b5eb46f92593b95b3b4ea095
|
Fixing a bug with reading response bytes in playwright/camoufox when `network_idle` is used
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
<4> """
<5> with Camoufox(
<6> proxy=self.proxy,
<7> addons=self.addons,
<8> headless=self.headless,
<9> humanize=self.humanize,
<10> i_know_what_im_doing=True, # To turn warnings off with the user configurations
<11> allow_webgl=self.allow_webgl,
<12> block_webrtc=self.block_webrtc,
<13> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<14> os=None if self.os_randomize else get_os_name(),
<15> ) as browser:
<16> page = browser.new_page()
<17> page.set_default_navigation_timeout(self.timeout)
<18> page.set_default_timeout(self.timeout)
<19> if self.disable_resources:
<20> page.route("**/*", intercept_route)
<21>
<22> if self.extra_headers:
<23> page.set_extra_http_headers(self.extra_headers)
<24>
<25> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<26> page.wait_for_load_state(state="domcontentloaded")
<27> if self.network_idle:
<28> page.wait_for_load_state('networkidle')
<29>
<30> page = self.page_action(page)
<31>
<32> if self.wait_selector and type(self.wait_selector) is str:
<33> waiter = page.locator(self</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
body=res.body(),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
**self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.os_randomize = bool(os_randomize)
self.extra_headers = extra_headers or {}
self.proxy = construct_proxy_dict(proxy)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, body: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, encoding: str='utf-8', *, huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=True, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True, **kwargs)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
===========unchanged ref 1===========
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
"""
if not self.stealth:
from playwright.sync_api import sync_playwright
else:
from rebrowser_playwright.sync_api import sync_playwright
with sync_playwright() as p:
# Handle the UserAgent early
if self.useragent:
extra_headers = {}
useragent = self.useragent
else:
extra_headers = generate_headers(browser_mode=True)
useragent = extra_headers.get('User-Agent')
# Prepare the flags before diving
flags = DEFAULT_STEALTH_FLAGS
if self.hide_canvas:
flags += ['--fingerprinting-canvas-image-data-noise']
if self.disable_webgl:
flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
# Creating the browser
if self.cdp_url:
cdp_url = self._cdp_url_logic(flags if self.stealth else None)
browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
else:
if self.stealth:
browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
else:
browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable-automation'])
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en</s>
===========changed ref 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
<s>
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
proxy=self.proxy,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io</s>
|
|
scrapling.engines.camo/CamoufoxEngine.fetch
|
Modified
|
D4Vinci~Scrapling
|
19ad82caef95030478bc8c28f72902b38745feb5
|
Calculate status text manually if it's not returned by PlayWright API
|
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
<0> """Opens up the browser and do your request based on your chosen options.
<1>
<2> :param url: Target url.
<3> :return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
<4> """
<5> with Camoufox(
<6> proxy=self.proxy,
<7> addons=self.addons,
<8> headless=self.headless,
<9> humanize=self.humanize,
<10> i_know_what_im_doing=True, # To turn warnings off with the user configurations
<11> allow_webgl=self.allow_webgl,
<12> block_webrtc=self.block_webrtc,
<13> block_images=self.block_images, # Careful! it makes some websites doesn't finish loading at all like stackoverflow even in headful
<14> os=None if self.os_randomize else get_os_name(),
<15> ) as browser:
<16> page = browser.new_page()
<17> page.set_default_navigation_timeout(self.timeout)
<18> page.set_default_timeout(self.timeout)
<19> if self.disable_resources:
<20> page.route("**/*", intercept_route)
<21>
<22> if self.extra_headers:
<23> page.set_extra_http_headers(self.extra_headers)
<24>
<25> res = page.goto(url, referer=generate_convincing_referer(url) if self.google_search else None)
<26> page.wait_for_load_state(state="domcontentloaded")
<27> if self.network_idle:
<28> page.wait_for_load_state('networkidle')
<29>
<30> page = self.page_action(page)
<31>
<32> if self.wait_selector and type(self.wait_selector) is str:
<33> waiter = page.locator(self</s>
|
===========below chunk 0===========
# module: scrapling.engines.camo
class CamoufoxEngine:
def fetch(self, url: str) -> Response:
# offset: 1
waiter.wait_for(state=self.wait_selector_state)
content_type = res.headers.get('content-type', '')
# Parse charset from content-type
encoding = 'utf-8' # default encoding
if 'charset=' in content_type.lower():
encoding = content_type.lower().split('charset=')[-1].split(';')[0].strip()
response = Response(
url=res.url,
text=page.content(),
body=page.content().encode('utf-8'),
status=res.status,
reason=res.status_text,
encoding=encoding,
cookies={cookie['name']: cookie['value'] for cookie in page.context.cookies()},
headers=res.all_headers(),
request_headers=res.request.all_headers(),
**self.adaptor_arguments
)
page.close()
return response
===========unchanged ref 0===========
at: scrapling.engines.camo.CamoufoxEngine.__init__
self.headless = headless
self.block_images = bool(block_images)
self.disable_resources = bool(disable_resources)
self.block_webrtc = bool(block_webrtc)
self.allow_webgl = bool(allow_webgl)
self.network_idle = bool(network_idle)
self.google_search = bool(google_search)
self.os_randomize = bool(os_randomize)
self.extra_headers = extra_headers or {}
self.proxy = construct_proxy_dict(proxy)
self.addons = addons or []
self.humanize = humanize
self.timeout = check_type_validity(timeout, [int, float], 30000)
self.page_action = do_nothing
self.page_action = page_action
self.wait_selector = wait_selector
self.wait_selector_state = wait_selector_state
at: scrapling.engines.toolbelt.custom
Response(url: str, text: str, body: bytes, status: int, reason: str, cookies: Dict, headers: Dict, request_headers: Dict, encoding: str='utf-8', *, huge_tree: bool=True, root: Optional[html.HtmlElement]=None, keep_comments: Optional[bool]=False, auto_match: Optional[bool]=True, storage: Any=SQLiteStorageSystem, storage_args: Optional[Dict]=None, debug: Optional[bool]=True, **kwargs)
at: scrapling.engines.toolbelt.fingerprints
generate_convincing_referer(url: str) -> str
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
get_os_name() -> Union[str, None]
_lru_cache_wrapper(*args: Hashable, **kwargs: Hashable) -> _T
===========unchanged ref 1===========
at: scrapling.engines.toolbelt.navigation
intercept_route(route: Route) -> Union[Route, None]
===========changed ref 0===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: Target url.
:return: A `Response` object that is the same as `Adaptor` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
"""
if not self.stealth:
from playwright.sync_api import sync_playwright
else:
from rebrowser_playwright.sync_api import sync_playwright
with sync_playwright() as p:
# Handle the UserAgent early
if self.useragent:
extra_headers = {}
useragent = self.useragent
else:
extra_headers = generate_headers(browser_mode=True)
useragent = extra_headers.get('User-Agent')
# Prepare the flags before diving
flags = DEFAULT_STEALTH_FLAGS
if self.hide_canvas:
flags += ['--fingerprinting-canvas-image-data-noise']
if self.disable_webgl:
flags += ['--disable-webgl', '--disable-webgl-image-chromium', '--disable-webgl2']
# Creating the browser
if self.cdp_url:
cdp_url = self._cdp_url_logic(flags if self.stealth else None)
browser = p.chromium.connect_over_cdp(endpoint_url=cdp_url)
else:
if self.stealth:
browser = p.chromium.launch(headless=self.headless, args=flags, ignore_default_args=['--enable-automation'], chromium_sandbox=True)
else:
browser = p.chromium.launch(headless=self.headless, ignore_default_args=['--enable-automation'])
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en</s>
===========changed ref 1===========
# module: scrapling.engines.pw
class PlaywrightEngine:
def fetch(self, url: str) -> Response:
# offset: 1
<s>
# Creating the context
if self.stealth:
context = browser.new_context(
locale='en-US',
is_mobile=False,
has_touch=False,
proxy=self.proxy,
color_scheme='dark', # Bypasses the 'prefersLightColor' check in creepjs
user_agent=useragent,
device_scale_factor=2,
# I'm thinking about disabling it to rest from all Service Workers headache but let's keep it as it is for now
service_workers="allow",
ignore_https_errors=True,
extra_http_headers=extra_headers,
screen={"width": 1920, "height": 1080},
viewport={"width": 1920, "height": 1080},
permissions=["geolocation", 'notifications'],
)
else:
context = browser.new_context(
color_scheme='dark',
user_agent=useragent,
device_scale_factor=2,
extra_http_headers=extra_headers
)
# Finally we are in business
page = context.new_page()
page.set_default_navigation_timeout(self.timeout)
page.set_default_timeout(self.timeout)
if self.extra_headers:
page.set_extra_http_headers(self.extra_headers)
if self.disable_resources:
page.route("**/*", intercept_route)
if self.stealth:
# Basic bypasses nothing fancy as I'm still working on it
# But with adding these bypasses to the above config, it bypasses many online tests like
# https://bot.sannysoft.com/
# https://kaliiiiiiiiii.github.io</s>
|
|
bitsandbytes.functional/optimizer_update_32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<31>:<add> unorm_vec : torch.Tensor
<add> The tensor for the update norm.
<add> max_unorm : float
<add> The maximum update norm relative to the weight norm.
<add> skip_zeros : bool
<add> Whether to skip zero-valued gradients or not (default: False).
|
<s>: Tensor, state1: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
<0> '''
<1> Performs an inplace optimizer update with one or two optimizer states.
<2>
<3> Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
<4>
<5> Parameters
<6> ----------
<7> optimizer_name : str
<8> The name of the optimizer: {adam}.
<9> g : torch.Tensor
<10> Gradient tensor.
<11> p : torch.Tensor
<12> Parameter tensor.
<13> state1 : torch.Tensor
<14> Optimizer state 1.
<15> beta1 : float
<16> Optimizer beta1.
<17> eps : float
<18> Optimizer epsilon.
<19> weight_decay : float
<20> Weight decay.
<21> step : int
<22> Current optimizer step.
<23> lr : float
<24> The learning rate.
<25> state2 : torch.Tensor
<26> Optimizer state 2.
<27> beta2 : float
<28> Optimizer beta2.
<29> gnorm_scale : float
<30> The factor to rescale the gradient to the max clip value.
<31> '''
<32>
<33> param_norm = 0.0
<34> if max_unorm > 0.0:
<35> param_norm = torch.norm(p.data.float())
<36>
<37> if optimizer_name not in str2optimizer32bit:
<38> raise NotImplementError(f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}')
<39>
<40> if g.dtype == torch.float32 and state1.dtype == torch.float32:
<41> str2optimizer32bit[optimizer_name][0](get_</s>
|
===========below chunk 0===========
<s>: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
# offset: 1
ct.c_float(param_norm), ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
|
bitsandbytes.functional/optimizer_update_8bit_blockwise
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<4>:<add> get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
<del> get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
<5>:<add> ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
<9>:<add> get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
<del> get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
<10>:<add> ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
|
<s>,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
<0> if g.dtype == torch.float32 and state1.dtype == torch.uint8:
<1> str2optimizer8bit_blockwise[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
<2> ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
<3> ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
<4> get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
<5> elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
<6> str2optimizer8bit_blockwise[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
<7> ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
<8> ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
<9> get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
<10> else:
<11> raise ValueError(f'Gradient</s>
|
===========below chunk 0===========
<s>: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
# offset: 1
===========changed ref 0===========
<s>: Tensor, state1: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
'''
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
+ unorm_vec : torch.Tensor
+ The tensor for the update norm.
+ max_unorm : float
+ The maximum update norm relative to the weight norm.
+ skip_zeros : bool
+ Whether to skip zero-valued gradients or not (default: False).
'''
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementError(f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}')
if g.dtype == torch.float32</s>
===========changed ref 1===========
<s>: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
# offset: 1
<s> Choices: {",".join(str2optimizer32bit.keys())}')
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
+ ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
- ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1),</s>
===========changed ref 2===========
<s>: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
# offset: 2
<s>c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
+ ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
- ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
|
bitsandbytes.optim.optimizer/Optimizer8bit.get_config
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<10>:<add> config['skip_zeros'] = self.args.skip_zeros
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def get_config(self, gindex, pindex, group):
<0> config = {}
<1> config['betas'] = group['betas']
<2> config['eps'] = group['eps']
<3> config['weight_decay'] = group['weight_decay']
<4> config['lr'] = group['lr']
<5> config['optim_bits'] = self.args.optim_bits
<6> config['min_8bit_size'] = self.args.min_8bit_size
<7> config['percentile_clipping'] = self.args.percentile_clipping
<8> config['block_wise'] = self.args.block_wise
<9> config['max_unorm'] = self.args.max_unorm
<10>
<11> if (gindex, pindex) in self.mng.index2config:
<12> config.update(self.mng.index2config[(gindex, pindex)])
<13> return config
<14>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer.Optimizer8bit.__init__
self.mng = GlobalOptimManager.get_instance()
===========changed ref 0===========
<s>,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(abs</s>
===========changed ref 1===========
<s>: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
# offset: 1
<s>c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
===========changed ref 2===========
<s>: Tensor, state1: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
'''
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
+ unorm_vec : torch.Tensor
+ The tensor for the update norm.
+ max_unorm : float
+ The maximum update norm relative to the weight norm.
+ skip_zeros : bool
+ Whether to skip zero-valued gradients or not (default: False).
'''
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementError(f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}')
if g.dtype == torch.float32</s>
===========changed ref 3===========
<s>: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
# offset: 1
<s> Choices: {",".join(str2optimizer32bit.keys())}')
if g.dtype == torch.float32 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][0](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps), ct.c_float(weight_decay),
+ ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
- ct.c_int32(step), ct.c_float(lr), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.float32:
str2optimizer32bit[optimizer_name][1](get_ptr(g), get_ptr(p), get_ptr(state1), get_ptr(state2), get_ptr(unorm_vec), ct.c_float(max_unorm),
ct.c_float(param_norm), ct.c_float(beta1),</s>
|
bitsandbytes.optim.optimizer/Optimizer2State.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<24>:<add> args['skip_zeros'] = skip_zeros
|
<s> lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0.0, optim_bits=32, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0,
- min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0):
+ skip_zeros=False):
<0> if not 0.0 <= lr:
<1> raise ValueError("Invalid learning rate: {}".format(lr))
<2> if not 0.0 <= eps:
<3> raise ValueError("Invalid epsilon value: {}".format(eps))
<4> if isinstance(betas, str):
<5> betas = eval(betas)
<6> print(betas, 'parsed')
<7> for i in range(len(betas)):
<8> if not 0.0 <= betas[i] < 1.0:
<9> raise ValueError(f"Invalid beta parameter at index {i}: {betas[i]}")
<10> if not 0.0 <= weight_decay:
<11> raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
<12> defaults = dict(lr=lr, betas=betas, eps=eps,
<13> weight_decay=weight_decay)
<14> super(Optimizer2State, self).__init__(params, defaults, optim_bits)
<15>
<16> if args is None:
<17> args = {}
<18> args['optim_bits'] = optim_bits
<19> args['percentile_clipping'] = 100
<20> args['min_8bit_size'] = min_8bit_size
<21> args['percentile_clipping'] = percentile_clipping
<22> args['block_wise'] = block_wise
<23> args['max_unorm'] = max_unorm
<24>
<25> self.args = MockArgs(args)
<26> else:
<27> self.args = args
<28>
<29> self.optimizer_name = optimizer_name
<30>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
MockArgs(initial_data)
Optimizer2State(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0)
at: bitsandbytes.optim.optimizer.Optimizer8bit
__init__(params, defaults, optim_bits=32)
__init__(self, params, defaults, optim_bits=32)
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def get_config(self, gindex, pindex, group):
config = {}
config['betas'] = group['betas']
config['eps'] = group['eps']
config['weight_decay'] = group['weight_decay']
config['lr'] = group['lr']
config['optim_bits'] = self.args.optim_bits
config['min_8bit_size'] = self.args.min_8bit_size
config['percentile_clipping'] = self.args.percentile_clipping
config['block_wise'] = self.args.block_wise
config['max_unorm'] = self.args.max_unorm
+ config['skip_zeros'] = self.args.skip_zeros
if (gindex, pindex) in self.mng.index2config:
config.update(self.mng.index2config[(gindex, pindex)])
return config
===========changed ref 1===========
<s>,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(abs</s>
===========changed ref 2===========
<s>: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
# offset: 1
<s>c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
===========changed ref 3===========
<s>: Tensor, state1: Tensor,
beta1: float, eps: float, step: int, lr: float,
state2: Tensor=None, beta2: float=0.0,
weight_decay: float=0.0, gnorm_scale: float=1.0,
+ unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None:
- unorm_vec: Tensor=None, max_unorm: float=0.0) -> None:
'''
Performs an inplace optimizer update with one or two optimizer states.
Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
Parameters
----------
optimizer_name : str
The name of the optimizer: {adam}.
g : torch.Tensor
Gradient tensor.
p : torch.Tensor
Parameter tensor.
state1 : torch.Tensor
Optimizer state 1.
beta1 : float
Optimizer beta1.
eps : float
Optimizer epsilon.
weight_decay : float
Weight decay.
step : int
Current optimizer step.
lr : float
The learning rate.
state2 : torch.Tensor
Optimizer state 2.
beta2 : float
Optimizer beta2.
gnorm_scale : float
The factor to rescale the gradient to the max clip value.
+ unorm_vec : torch.Tensor
+ The tensor for the update norm.
+ max_unorm : float
+ The maximum update norm relative to the weight norm.
+ skip_zeros : bool
+ Whether to skip zero-valued gradients or not (default: False).
'''
param_norm = 0.0
if max_unorm > 0.0:
param_norm = torch.norm(p.data.float())
if optimizer_name not in str2optimizer32bit:
raise NotImplementError(f'Optimizer not implemented: {optimizer_name}. Choices: {",".join(str2optimizer32bit.keys())}')
if g.dtype == torch.float32</s>
|
bitsandbytes.optim.optimizer/Optimizer1State.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<21>:<add> args['skip_zeros'] = skip_zeros
|
<s> lr=1e-3, betas=(0.9, 0.0), eps=1e-8,
weight_decay=0.0, optim_bits=32, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0,
- min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0):
+ skip_zeros=False):
<0> if not 0.0 <= lr:
<1> raise ValueError("Invalid learning rate: {}".format(lr))
<2> if not 0.0 <= eps:
<3> raise ValueError("Invalid epsilon value: {}".format(eps))
<4> for i in range(len(betas)):
<5> if not 0.0 <= betas[i] < 1.0:
<6> raise ValueError(f"Invalid beta parameter at index {i}: {betas[i]}")
<7> if not 0.0 <= weight_decay:
<8> raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
<9> defaults = dict(lr=lr, betas=betas, eps=eps,
<10> weight_decay=weight_decay)
<11> super(Optimizer1State, self).__init__(params, defaults, optim_bits)
<12>
<13> if args is None:
<14> args = {}
<15> args['optim_bits'] = optim_bits
<16> args['percentile_clipping'] = 100
<17> args['min_8bit_size'] = min_8bit_size
<18> args['percentile_clipping'] = percentile_clipping
<19> args['block_wise'] = block_wise
<20> args['max_unorm'] = max_unorm
<21>
<22> self.args = MockArgs(args)
<23> else:
<24> self.args = args
<25>
<26> self.optimizer_name = optimizer_name
<27>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
MockArgs(initial_data)
Optimizer1State(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0)
at: bitsandbytes.optim.optimizer.Optimizer8bit
__init__(params, defaults, optim_bits=32)
__init__(self, params, defaults, optim_bits=32)
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def get_config(self, gindex, pindex, group):
config = {}
config['betas'] = group['betas']
config['eps'] = group['eps']
config['weight_decay'] = group['weight_decay']
config['lr'] = group['lr']
config['optim_bits'] = self.args.optim_bits
config['min_8bit_size'] = self.args.min_8bit_size
config['percentile_clipping'] = self.args.percentile_clipping
config['block_wise'] = self.args.block_wise
config['max_unorm'] = self.args.max_unorm
+ config['skip_zeros'] = self.args.skip_zeros
if (gindex, pindex) in self.mng.index2config:
config.update(self.mng.index2config[(gindex, pindex)])
return config
===========changed ref 1===========
<s> lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0.0, optim_bits=32, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0,
- min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0):
+ skip_zeros=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if isinstance(betas, str):
betas = eval(betas)
print(betas, 'parsed')
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(f"Invalid beta parameter at index {i}: {betas[i]}")
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(Optimizer2State, self).__init__(params, defaults, optim_bits)
if args is None:
args = {}
args['optim_bits'] = optim_bits
args['percentile_clipping'] = 100
args['min_8bit_size'] = min_8bit_size
args['percentile_clipping'] = percentile_clipping
args['block_wise'] = block_wise
args['max_unorm'] = max_unorm
+ args['skip_zeros'] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
===========changed ref 2===========
<s>,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(abs</s>
===========changed ref 3===========
<s>: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
# offset: 1
<s>c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
|
bitsandbytes.optim.optimizer/Optimizer1State.update_step
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<16>:<add> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'],
<del> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
<17>:<add> skip_zeros=False)
|
# module: bitsandbytes.optim.optimizer
class Optimizer1State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
<0> state = self.state[p]
<1> grad = p.grad
<2>
<3> config = self.get_config(gindex, pindex, group)
<4>
<5> state['step'] += 1
<6> step = state['step']
<7>
<8> if config['percentile_clipping'] < 100:
<9> current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(grad, state['gnorm_vec'], step, config['percentile_clipping'])
<10> else:
<11> gnorm_scale = 1.0
<12>
<13> if state['state1'].dtype == torch.float:
<14> F.optimizer_update_32bit(self.optimizer_name, grad, p, state['state1'], config['betas'][0], config['eps'], step, config['lr'],
<15> None, 0.0, config['weight_decay'], gnorm_scale,
<16> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
<17>
<18> elif state['state1'].dtype == torch.uint8 and not config['block_wise']:
<19> F.optimizer_update_8bit(self.optimizer_name, grad, p, state['state1'], None, config['betas'][0], config['betas'][1],
<20> config['eps'], step, config['lr'], state['qmap1'], None, state['max1'], None, state['new_max1'], None,
<21> config['weight_decay'], gnorm_scale,
<22> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
<23>
<24> state['max1'], state['new_max1'] = state['new_max1'], state['max1']
<25> elif state['state1'].dtype == torch.uint8 and config['block</s>
|
===========below chunk 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer1State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
# offset: 1
F.optimizer_update_8bit_blockwise(self.optimizer_name, grad, p, state['state1'], None, config['betas'][0], config['betas'][1],
config['eps'], step, config['lr'],
state['qmap1'], None, state['absmax1'], None,
config['weight_decay'], gnorm_scale=gnorm_scale)
===========unchanged ref 0===========
at: bitsandbytes.functional
optimizer_update_32bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, beta1: float, eps: float, step: int, lr: float, state2: Tensor=None, beta2: float=0.0, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None
optimizer_update_8bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, max1: Tensor, max2: Tensor, new_max1: Tensor, new_max2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0) -> None
optimizer_update_8bit_blockwise(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, skip_zeros=False) -> None
percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int=5)
at: bitsandbytes.optim.optimizer.Optimizer1State.__init__
self.optimizer_name = optimizer_name
at: bitsandbytes.optim.optimizer.Optimizer8bit
get_config(gindex, pindex, group)
get_config(self, gindex, pindex, group)
update_step(self, group, p, gindex, pindex)
at: torch._C
float: dtype = ...
uint8: dtype = ...
===========unchanged ref 1===========
at: torch.autograd.grad_mode
no_grad()
at: torch.optim.optimizer.Optimizer.__init__
self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict)
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def get_config(self, gindex, pindex, group):
config = {}
config['betas'] = group['betas']
config['eps'] = group['eps']
config['weight_decay'] = group['weight_decay']
config['lr'] = group['lr']
config['optim_bits'] = self.args.optim_bits
config['min_8bit_size'] = self.args.min_8bit_size
config['percentile_clipping'] = self.args.percentile_clipping
config['block_wise'] = self.args.block_wise
config['max_unorm'] = self.args.max_unorm
+ config['skip_zeros'] = self.args.skip_zeros
if (gindex, pindex) in self.mng.index2config:
config.update(self.mng.index2config[(gindex, pindex)])
return config
===========changed ref 1===========
<s>,
beta1: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
if g.dtype == torch.float32 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][0](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
str2optimizer8bit_blockwise[optimizer_name][1](get_ptr(p), get_ptr(g), get_ptr(state1), get_ptr(state2),
ct.c_float(beta1), ct.c_float(beta2), ct.c_float(eps),
ct.c_int32(step), ct.c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(abs</s>
===========changed ref 2===========
<s>: float, beta2: float, eps: float,
step: int, lr: float, qmap1: Tensor, qmap2: Tensor,
+ absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0,
- absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0) -> None:
+ skip_zeros=False) -> None:
# offset: 1
<s>c_float(lr), get_ptr(qmap1), get_ptr(qmap2),
+ get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale),
- get_ptr(absmax1), get_ptr(absmax2), ct.c_float(weight_decay), ct.c_float(gnorm_scale), ct.c_int32(g.numel()))
+ ct.c_bool(skip_zeros), ct.c_int32(g.numel()))
else:
raise ValueError(f'Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}')
|
tests.test_optim/test_global_config
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
bb34fd50a1fec74e62beb6e23d51f0142c7d0ab6
|
Initial plumbing for skip_zeros.
|
<11>:<add> bnb.optim.GlobalOptimManager.get_instance().override_config(p2, 'skip_zeros', True)
<25>:<add> original_p2 = p2[mask].clone()
<add>
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype", values, ids=names)
def test_global_config(dim1, dim2, gtype):
<0> if dim1 == 1 and dim2 == 1: return
<1> p1 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
<2> p2 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
<3> p3 = torch.randn(dim1,dim2, device='cpu', dtype=gtype)*0.1
<4> mask = torch.rand_like(p2) < 0.1
<5> beta1 = 0.9
<6> beta2 = 0.999
<7> lr = 0.001
<8> eps = 1e-8
<9>
<10> bnb.optim.GlobalOptimManager.get_instance().initialize()
<11> bnb.optim.GlobalOptimManager.get_instance().override_config(p3, 'optim_bits', 8)
<12>
<13> bnb.optim.GlobalOptimManager.get_instance().register_parameters([p1, p2, p3])
<14> p1 = p1.cuda()
<15> p2 = p2.cuda()
<16> p3 = p3.cuda()
<17>
<18> adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)
<19>
<20> if gtype == torch.float32:
<21> atol, rtol = 1e-6, 1e-5
<22> else:
<23> atol, rtol = 1e-4, 1e-3
<24>
<25> for i in range(50):
<26> g1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1 + 0.001
<27> g2 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1 + 0.001
<28> g3 = torch.randn(dim1,dim2, device</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype", values, ids=names)
def test_global_config(dim1, dim2, gtype):
# offset: 1
p1.grad = g1
p2.grad = g2
p3.grad = g3
adam2.step()
assert adam2.state[p3]['state1'].dtype == torch.uint8
assert adam2.state[p3]['state2'].dtype == torch.uint8
===========unchanged ref 0===========
at: _pytest.mark.structures
MARK_GEN = MarkGenerator(_ispytest=True)
at: _pytest.mark.structures.MarkGenerator
skip: _SkipMarkDecorator
skipif: _SkipifMarkDecorator
xfail: _XfailMarkDecorator
parametrize: _ParametrizeMarkDecorator
usefixtures: _UsefixturesMarkDecorator
filterwarnings: _FilterwarningsMarkDecorator
at: bitsandbytes.optim.adam
Adam(params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True)
at: bitsandbytes.optim.optimizer
GlobalOptimManager()
at: bitsandbytes.optim.optimizer.GlobalOptimManager
_instance = None
get_instance()
at: tests.test_optim
values = list(product(dim1,dim2, gtype))
names = ['dim1_{0}_dim2_{1}_gtype_{2}'.format(*vals) for vals in values]
at: torch._C
float32: dtype = ...
at: torch._C._VariableFunctions
rand_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
===========unchanged ref 1===========
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor
randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device:</s>
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def get_config(self, gindex, pindex, group):
config = {}
config['betas'] = group['betas']
config['eps'] = group['eps']
config['weight_decay'] = group['weight_decay']
config['lr'] = group['lr']
config['optim_bits'] = self.args.optim_bits
config['min_8bit_size'] = self.args.min_8bit_size
config['percentile_clipping'] = self.args.percentile_clipping
config['block_wise'] = self.args.block_wise
config['max_unorm'] = self.args.max_unorm
+ config['skip_zeros'] = self.args.skip_zeros
if (gindex, pindex) in self.mng.index2config:
config.update(self.mng.index2config[(gindex, pindex)])
return config
===========changed ref 1===========
<s> lr=1e-3, betas=(0.9, 0.0), eps=1e-8,
weight_decay=0.0, optim_bits=32, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0,
- min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0):
+ skip_zeros=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
for i in range(len(betas)):
if not 0.0 <= betas[i] < 1.0:
raise ValueError(f"Invalid beta parameter at index {i}: {betas[i]}")
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(Optimizer1State, self).__init__(params, defaults, optim_bits)
if args is None:
args = {}
args['optim_bits'] = optim_bits
args['percentile_clipping'] = 100
args['min_8bit_size'] = min_8bit_size
args['percentile_clipping'] = percentile_clipping
args['block_wise'] = block_wise
args['max_unorm'] = max_unorm
+ args['skip_zeros'] = skip_zeros
self.args = MockArgs(args)
else:
self.args = args
self.optimizer_name = optimizer_name
|
bitsandbytes.optim.optimizer/Optimizer2State.update_step
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a6eae2e7f2bf03f268fcb6b055201ff6827684c4
|
Added skip_zeros; tests are passing.
|
<16>:<add> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'], skip_zeros=config['skip_zeros'])
<del> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
|
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
<0> state = self.state[p]
<1> grad = p.grad
<2>
<3> config = self.get_config(gindex, pindex, group)
<4>
<5> state['step'] += 1
<6> step = state['step']
<7>
<8> if config['percentile_clipping'] < 100:
<9> current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(grad, state['gnorm_vec'], step, config['percentile_clipping'])
<10> else:
<11> gnorm_scale = 1.0
<12>
<13> if state['state1'].dtype == torch.float:
<14> F.optimizer_update_32bit(self.optimizer_name, grad, p, state['state1'], config['betas'][0], config['eps'], step, config['lr'],
<15> state['state2'], config['betas'][1], config['weight_decay'], gnorm_scale,
<16> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
<17>
<18> elif state['state1'].dtype == torch.uint8 and not config['block_wise']:
<19> F.optimizer_update_8bit(self.optimizer_name, grad, p, state['state1'], state['state2'], config['betas'][0], config['betas'][1],
<20> config['eps'], step, config['lr'],
<21> state['qmap1'], state['qmap2'], state['max1'], state['max2'], state['new_max1'], state['new_max2'],
<22> config['weight_decay'], gnorm_scale=gnorm_scale,
<23> unorm_vec=state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
<24>
<25> # swap maxes
<26> state</s>
|
===========below chunk 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
# offset: 1
state['max2'], state['new_max2'] = state['new_max2'], state['max2']
elif state['state1'].dtype == torch.uint8 and config['block_wise']:
F.optimizer_update_8bit_blockwise(self.optimizer_name, grad, p, state['state1'], state['state2'], config['betas'][0], config['betas'][1],
config['eps'], step, config['lr'],
state['qmap1'], state['qmap2'], state['absmax1'], state['absmax2'],
config['weight_decay'], gnorm_scale=gnorm_scale)
===========unchanged ref 0===========
at: bitsandbytes.functional
optimizer_update_32bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, beta1: float, eps: float, step: int, lr: float, state2: Tensor=None, beta2: float=0.0, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None
optimizer_update_8bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, max1: Tensor, max2: Tensor, new_max1: Tensor, new_max2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0) -> None
optimizer_update_8bit_blockwise(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, skip_zeros=False) -> None
percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int=5)
at: bitsandbytes.optim.optimizer.Optimizer2State.__init__
self.optimizer_name = optimizer_name
at: bitsandbytes.optim.optimizer.Optimizer8bit
get_config(gindex, pindex, group)
update_step(self, group, p, gindex, pindex)
at: torch._C
float: dtype = ...
uint8: dtype = ...
===========unchanged ref 1===========
at: torch.autograd.grad_mode
no_grad()
at: torch.optim.optimizer.Optimizer.__init__
self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict)
|
bitsandbytes.optim.optimizer/Optimizer1State.update_step
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
a6eae2e7f2bf03f268fcb6b055201ff6827684c4
|
Added skip_zeros; tests are passing.
|
<17>:<add> skip_zeros=config['skip_zeros'])
<del> skip_zeros=False)
|
# module: bitsandbytes.optim.optimizer
class Optimizer1State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
<0> state = self.state[p]
<1> grad = p.grad
<2>
<3> config = self.get_config(gindex, pindex, group)
<4>
<5> state['step'] += 1
<6> step = state['step']
<7>
<8> if config['percentile_clipping'] < 100:
<9> current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(grad, state['gnorm_vec'], step, config['percentile_clipping'])
<10> else:
<11> gnorm_scale = 1.0
<12>
<13> if state['state1'].dtype == torch.float:
<14> F.optimizer_update_32bit(self.optimizer_name, grad, p, state['state1'], config['betas'][0], config['eps'], step, config['lr'],
<15> None, 0.0, config['weight_decay'], gnorm_scale,
<16> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'],
<17> skip_zeros=False)
<18>
<19> elif state['state1'].dtype == torch.uint8 and not config['block_wise']:
<20> F.optimizer_update_8bit(self.optimizer_name, grad, p, state['state1'], None, config['betas'][0], config['betas'][1],
<21> config['eps'], step, config['lr'], state['qmap1'], None, state['max1'], None, state['new_max1'], None,
<22> config['weight_decay'], gnorm_scale,
<23> state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
<24>
<25> state['max1'], state['new_max1'] = state['new_max1'], state['max1']
<26> elif state['state1'].dtype</s>
|
===========below chunk 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer1State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
# offset: 1
F.optimizer_update_8bit_blockwise(self.optimizer_name, grad, p, state['state1'], None, config['betas'][0], config['betas'][1],
config['eps'], step, config['lr'],
state['qmap1'], None, state['absmax1'], None,
config['weight_decay'], gnorm_scale=gnorm_scale, skip_zeros=False)
===========unchanged ref 0===========
at: bitsandbytes.functional
optimizer_update_32bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, beta1: float, eps: float, step: int, lr: float, state2: Tensor=None, beta2: float=0.0, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0, skip_zeros=False) -> None
optimizer_update_8bit(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, max1: Tensor, max2: Tensor, new_max1: Tensor, new_max2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, unorm_vec: Tensor=None, max_unorm: float=0.0) -> None
optimizer_update_8bit_blockwise(optimizer_name: str, g: Tensor, p: Tensor, state1: Tensor, state2: Tensor, beta1: float, beta2: float, eps: float, step: int, lr: float, qmap1: Tensor, qmap2: Tensor, absmax1: Tensor, absmax2: Tensor, weight_decay: float=0.0, gnorm_scale: float=1.0, skip_zeros=False) -> None
percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int=5)
at: bitsandbytes.optim.optimizer.Optimizer1State.__init__
self.optimizer_name = optimizer_name
at: bitsandbytes.optim.optimizer.Optimizer8bit
get_config(gindex, pindex, group)
update_step(self, group, p, gindex, pindex)
at: torch._C
float: dtype = ...
uint8: dtype = ...
===========unchanged ref 1===========
at: torch.autograd.grad_mode
no_grad()
at: torch.optim.optimizer.Optimizer.__init__
self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict)
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
state = self.state[p]
grad = p.grad
config = self.get_config(gindex, pindex, group)
state['step'] += 1
step = state['step']
if config['percentile_clipping'] < 100:
current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(grad, state['gnorm_vec'], step, config['percentile_clipping'])
else:
gnorm_scale = 1.0
if state['state1'].dtype == torch.float:
F.optimizer_update_32bit(self.optimizer_name, grad, p, state['state1'], config['betas'][0], config['eps'], step, config['lr'],
state['state2'], config['betas'][1], config['weight_decay'], gnorm_scale,
+ state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'], skip_zeros=config['skip_zeros'])
- state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
elif state['state1'].dtype == torch.uint8 and not config['block_wise']:
F.optimizer_update_8bit(self.optimizer_name, grad, p, state['state1'], state['state2'], config['betas'][0], config['betas'][1],
config['eps'], step, config['lr'],
state['qmap1'], state['qmap2'], state['max1'], state['max2'], state['new_max1'], state['new_max2'],
config['weight_decay'], gnorm_scale=gnorm_scale,
unorm_vec=state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max</s>
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
@torch.no_grad()
def update_step(self, group, p, gindex, pindex):
# offset: 1
<s>
unorm_vec=state['unorm_vec'] if config['max_unorm'] > 0.0 else None, max_unorm=config['max_unorm'])
# swap maxes
state['max1'], state['new_max1'] = state['new_max1'], state['max1']
state['max2'], state['new_max2'] = state['new_max2'], state['max2']
elif state['state1'].dtype == torch.uint8 and config['block_wise']:
F.optimizer_update_8bit_blockwise(self.optimizer_name, grad, p, state['state1'], state['state2'], config['betas'][0], config['betas'][1],
config['eps'], step, config['lr'],
state['qmap1'], state['qmap2'], state['absmax1'], state['absmax2'],
+ config['weight_decay'], gnorm_scale=gnorm_scale, skip_zeros=config['skip_zeros'])
- config['weight_decay'], gnorm_scale=gnorm_scale)
|
bitsandbytes.nn.modules/StableEmbedding.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
67a1283501fa24d346f8e8efb4fc888a9ed8d193
|
[FIX] passing of sparse in StableEmbedding
|
<0>:<add> super(StableEmbedding, self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight)
<del> super(StableEmbedding, self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, False, _weight)
|
<s>modules
class StableEmbedding(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
+ sparse: bool = False, _weight: Optional[Tensor] = None) -> None:
- sparse: bool = True, _weight: Optional[Tensor] = None) -> None:
<0> super(StableEmbedding, self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, False, _weight)
<1> self.norm = torch.nn.LayerNorm(embedding_dim)
<2> GlobalOptimManager.get_instance().register_parameters(self.weight)
<3> GlobalOptimManager.get_instance().override_config(self.weight, 'optim_bits', 32)
<4>
| |
bitsandbytes.optim.optimizer/Optimizer2State.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3cff6795fb70dd99b4802593f3c70d291e0cd1dc
|
Merge branch 'main' of github.com:facebookresearch/bitsandbytes into 0.26.0
|
<5>:<add> # format: '(beta1, beta2)'
<add> betas = betas.replace('(', '').replace(')', '').strip().split(',')
<add> betas = [float(b) for b in betas]
<del> betas = eval(betas)
<6>:<del> print(betas, 'parsed')
|
# module: bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
def __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0.0, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0,
skip_zeros=False):
<0> if not 0.0 <= lr:
<1> raise ValueError("Invalid learning rate: {}".format(lr))
<2> if not 0.0 <= eps:
<3> raise ValueError("Invalid epsilon value: {}".format(eps))
<4> if isinstance(betas, str):
<5> betas = eval(betas)
<6> print(betas, 'parsed')
<7> for i in range(len(betas)):
<8> if not 0.0 <= betas[i] < 1.0:
<9> raise ValueError(f"Invalid beta parameter at index {i}: {betas[i]}")
<10> if not 0.0 <= weight_decay:
<11> raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
<12> defaults = dict(lr=lr, betas=betas, eps=eps,
<13> weight_decay=weight_decay)
<14> super(Optimizer2State, self).__init__(params, defaults, optim_bits)
<15>
<16> if args is None:
<17> args = {}
<18> args['optim_bits'] = optim_bits
<19> args['percentile_clipping'] = 100
<20> args['min_8bit_size'] = min_8bit_size
<21> args['percentile_clipping'] = percentile_clipping
<22> args['block_wise'] = block_wise
<23> args['max_unorm'] = max_unorm
<24> args['skip_zeros'] = skip_zeros
<25>
<26> self.args = MockArgs(args)
<27> else:
<28> self.args = args
<29>
<30> self.optimizer_name = optimizer_</s>
|
===========below chunk 0===========
<s> bitsandbytes.optim.optimizer
class Optimizer2State(Optimizer8bit):
def __init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0.0, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0,
skip_zeros=False):
# offset: 1
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer
MockArgs(initial_data)
Optimizer2State(optimizer_name, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
at: bitsandbytes.optim.optimizer.Optimizer8bit
__init__(params, defaults, optim_bits=32)
__init__(self, params, defaults, optim_bits=32)
===========changed ref 0===========
# module: bitsandbytes.functional
lib = ct.cdll.LoadLibrary(os.path.dirname(__file__) + '/libbitsandbytes.so')
name2qmap = {}
''' C FUNCTIONS FOR OPTIMIZERS '''
str2optimizer32bit = {}
str2optimizer32bit['adam'] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer32bit['momentum'] = (lib.cmomentum32bit_g32, lib.cmomentum32bit_g16)
str2optimizer32bit['rmsprop'] = (lib.crmsprop32bit_g32, lib.crmsprop32bit_g16)
+ str2optimizer32bit['adagrad'] = (lib.cadagrad32bit_g32, lib.cadagrad32bit_g16)
str2optimizer32bit['lars'] = (lib.cmomentum32bit_g32, lib.cmomentum32bit_g16)
str2optimizer32bit['lamb'] = (lib.cadam32bit_g32, lib.cadam32bit_g16)
str2optimizer8bit = {}
str2optimizer8bit['adam'] = (lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16)
str2optimizer8bit['momentum'] = (lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16)
str2optimizer8bit['rmsprop'] = (lib.crmsprop_static_8bit_g32, lib.crmsprop_static_8bit_g16)
str2optimizer8bit['lamb'] = (lib.cadam_static_8bit_g32, lib.cadam_static_8bit_g16)
str2optimizer8bit['lars'] = (lib.cmomentum_static_8bit_g32, lib.cmomentum_static_8bit_g16)
str2optimizer8bit_blockwise = {}
str2optimizer8bit_blockwise['adam'] = (</s>
===========changed ref 1===========
# module: bitsandbytes.functional
# offset: 1
<s>g16)
str2optimizer8bit_blockwise = {}
str2optimizer8bit_blockwise['adam'] = (lib.cadam_8bit_blockwise_fp32, lib.cadam_8bit_blockwise_fp16)
str2optimizer8bit_blockwise['momentum'] = (lib.cmomentum_8bit_blockwise_fp32, lib.cmomentum_8bit_blockwise_fp16)
str2optimizer8bit_blockwise['rmsprop'] = (lib.crmsprop_8bit_blockwise_fp32, lib.crmsprop_8bit_blockwise_fp16)
+ str2optimizer8bit_blockwise['adagrad'] = (lib.cadagrad_8bit_blockwise_fp32, lib.cadagrad_8bit_blockwise_fp16)
optimal_normal = [-0.9939730167388916, -0.8727636337280273, -0.8097418546676636, -0.7660024166107178, -0.7318882346153259, -0.6793879270553589, -0.657649040222168, -0.6385974884033203, -0.6211113333702087, -0.5901028513908386, -0.5762918591499329, -0.5630806684494019, -0.5509274005889893, -0.5394591689109802, -0.5283197164535522, -0.517780065536499, -0.5074946284294128, -0.4980469048023224, -0.48867011070251465, -0.48003149032592773, -0.47125306</s>
===========changed ref 2===========
# module: bitsandbytes.functional
# offset: 2
<s>02014, -0.4629971981048584, -0.4547359049320221, -0.446626216173172, -0.43902668356895447, -0.43158355355262756, -0.4244747757911682, -0.4173796474933624, -0.41038978099823, -0.4055633544921875, -0.4035947024822235, -0.39701032638549805, -0.39057496190071106, -0.38439232110977173, -0.3782760500907898, -0.3721940815448761, -0.3661896586418152, -0.3604033589363098, -0.354605108499527, -0.34892538189888, -0.34320303797721863, -0.3376772701740265, -0.3323028087615967, -0.3269782066345215, -0.32166096568107605, -0.316457599401474, -0.3112771809101105, -0.3061025142669678, -0.30106794834136963, -0.2961243987083435, -0.2912728488445282, -0.28644347190856934, -0.28165507316589355, -0.2769731283187866, -0.2722635865211487, -0.26779335737228394, -0.26314786076545715, -0.25</s>
|
tests.test_optim/test_optimizer32bit
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
3cff6795fb70dd99b4802593f3c70d291e0cd1dc
|
Merge branch 'main' of github.com:facebookresearch/bitsandbytes into 0.26.0
|
<10>:<add> atol, rtol = 2e-6, 1e-5
<del> atol, rtol = 1e-6, 1e-5
|
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
<0> if dim1 == 1 and dim2 == 1: return
<1> p1 = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.1
<2> p2 = p1.clone()
<3> p1 = p1.float()
<4>
<5>
<6> torch_optimizer = str2optimizers[optim_name][0]([p1])
<7> bnb_optimizer = str2optimizers[optim_name][1]([p2])
<8>
<9> if gtype == torch.float32:
<10> atol, rtol = 1e-6, 1e-5
<11> else:
<12> atol, rtol = 1e-4, 1e-3
<13>
<14>
<15> for i in range(50):
<16> g = torch.randn(dim1,dim2, device='cuda', dtype=gtype)*0.01
<17> p1.grad = g.clone().float()
<18> p2.grad = g.clone()
<19>
<20> bnb_optimizer.step()
<21> torch_optimizer.step()
<22>
<23> for name1, name2 in str2statenames[optim_name]:
<24> torch.testing.assert_allclose(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2], atol=atol, rtol=rtol)
<25>
<26> torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
<27>
<28> if i % 10 == 0 and i > 0:
<29> path = get_temp_dir()
<30> torch.save(bnb_optimizer.state_dict(),join(path, 'opt.pt'))
<31> del bnb_optimizer
<32> bnb_optimizer = None
<33> bnb_optimizer = str2optimizers[optim_name][1]</s>
|
===========below chunk 0===========
# module: tests.test_optim
@pytest.mark.parametrize("dim1, dim2, gtype, optim_name", values, ids=names)
def test_optimizer32bit(dim1, dim2, gtype, optim_name):
# offset: 1
bnb_optimizer.load_state_dict(torch.load(join(path, 'opt.pt')))
rm_path(path)
torch.testing.assert_allclose(p1, p2.float(), atol=atol, rtol=rtol)
for name1, name2 in str2statenames[optim_name]:
torch.testing.assert_allclose(torch_optimizer.state[p1][name1], bnb_optimizer.state[p2][name2], atol=atol, rtol=rtol)
if gtype == torch.float16:
# the adam buffers should also be close because they are 32-bit
# but the paramters can diverge because they are 16-bit
# the difference grow larger and larger with each update
# --> copy the state to keep weights close
p1.data = p1.data.half().float()
p2.copy_(p1.data)
torch.testing.assert_allclose(p1.half(), p2)
if optim_name in ['lars', 'lamb']:
assert bnb_optimizer.state[p2]['unorm_vec'] > 0.0
===========changed ref 0===========
+ # module: bitsandbytes.optim.adamw
+
+
===========changed ref 1===========
+ # module: bitsandbytes.optim.adagrad
+
+
===========changed ref 2===========
+ # module: bitsandbytes.optim.adagrad
+ torch.optim.Adagrad
+
===========changed ref 3===========
+ # module: bitsandbytes.optim.adamw
+ class AdamW32bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
+ weight_decay=1e-2, amsgrad=False, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super(AdamW32bit, self).__init__('adam', params, lr, betas, eps,
+ weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise)
+
===========changed ref 4===========
+ # module: bitsandbytes.optim.adamw
+ class AdamW8bit(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
+ weight_decay=1e-2, amsgrad=False, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super(AdamW8bit, self).__init__('adam', params, lr, betas, eps,
+ weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise)
+
===========changed ref 5===========
+ # module: bitsandbytes.optim.adamw
+ class AdamW(Optimizer2State):
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
+ weight_decay=1e-2, amsgrad=False, optim_bits=32, args=None,
+ min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ super(AdamW, self).__init__('adam', params, lr, betas, eps,
+ weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
+
===========changed ref 6===========
+ # module: bitsandbytes.optim.adagrad
+ class Adagrad32bit(Optimizer1State):
+ def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10,
+ optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ if not 0.0 <= lr:
+ raise ValueError("Invalid learning rate: {}".format(lr))
+ if not 0.0 <= weight_decay:
+ raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
+ if not 0.0 <= eps:
+ raise ValueError("Invalid epsilon value: {}".format(eps))
+ if initial_accumulator_value != 0.0:
+ raise ValueError('Initial accumulator value != 0.0 not supported!')
+ if lr_decay != 0.0:
+ raise ValueError('Lr Decay != 0.0 not supported!')
+ super(Adagrad32bit, self).__init__('adagrad', params, lr, (0.0, 0.0), eps,
+ weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise)
+
===========changed ref 7===========
+ # module: bitsandbytes.optim.adagrad
+ class Adagrad(Optimizer1State):
+ def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10,
+ optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ if not 0.0 <= lr:
+ raise ValueError("Invalid learning rate: {}".format(lr))
+ if not 0.0 <= weight_decay:
+ raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
+ if not 0.0 <= eps:
+ raise ValueError("Invalid epsilon value: {}".format(eps))
+ if initial_accumulator_value != 0.0:
+ raise ValueError('Initial accumulator value != 0.0 not supported!')
+ if lr_decay != 0.0:
+ raise ValueError('Lr Decay != 0.0 not supported!')
+ super(Adagrad, self).__init__('adagrad', params, lr, (0.0, 0.0), eps,
+ weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
+
===========changed ref 8===========
+ # module: bitsandbytes.optim.adagrad
+ class Adagrad8bit(Optimizer1State):
+ def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10,
+ optim_bits=8, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
+ if not 0.0 <= lr:
+ raise ValueError("Invalid learning rate: {}".format(lr))
+ if not 0.0 <= weight_decay:
+ raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
+ if not 0.0 <= eps:
+ raise ValueError("Invalid epsilon value: {}".format(eps))
+ if initial_accumulator_value != 0.0:
+ raise ValueError('Initial accumulator value != 0.0 not supported!')
+ if lr_decay != 0.0:
+ raise ValueError('Lr Decay != 0.0 not supported!')
+ assert block_wise
+ super(Adagrad8bit, self).__init__('adagrad', params, lr, (0.0, 0.0), eps,
+ weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise)
+
|
bitsandbytes.optim.optimizer/GlobalOptimManager.initialize
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
20e1677dfdc4495038fd780807c8cbc253adf921
|
Added module override, bnb.nn.Embedding #13 #15 #19
|
<4>:<add> self.module_weight_config_triple = []
|
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
def initialize(self):
<0> self.pid2config = {}
<1> self.index2config = {}
<2> self.optimizer = None
<3> self.uses_config_override = False
<4>
| |
bitsandbytes.optim.optimizer/Optimizer8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
20e1677dfdc4495038fd780807c8cbc253adf921
|
Added module override, bnb.nn.Embedding #13 #15 #19
|
<1>:<add> self.initialized = False
<del> self.checked_if_on_gpu = False
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def __init__(self, params, defaults, optim_bits=32):
<0> super(Optimizer8bit, self).__init__(params, defaults)
<1> self.checked_if_on_gpu = False
<2> self.name2qmap = {}
<3>
<4> self.mng = GlobalOptimManager.get_instance()
<5> self.non_castable_tensor_keys = set(
<6> ['qmap1', 'qmap2',
<7> 'max1', 'max2',
<8> 'new_max1', 'new_max2',
<9> 'state1', 'state2',
<10> 'gnorm_vec', 'absmax1', 'absmax2',
<11> 'unorm_vec'])
<12>
<13> if optim_bits == 8: self.fill_qmap()
<14>
|
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
+ def register_module_override(self, module, param_name, config):
+ self.module_weight_config_triple.append((module, param_name, config))
+
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
def initialize(self):
self.pid2config = {}
self.index2config = {}
self.optimizer = None
self.uses_config_override = False
+ self.module_weight_config_triple = []
|
bitsandbytes.optim.optimizer/Optimizer8bit.to_gpu
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
20e1677dfdc4495038fd780807c8cbc253adf921
|
Added module override, bnb.nn.Embedding #13 #15 #19
|
<0>:<del> self.checked_if_on_gpu = True
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
<0> self.checked_if_on_gpu = True
<1> for gindex, group in enumerate(self.param_groups):
<2> for pindex, p in enumerate(group['params']):
<3> if p in self.state:
<4> values = self.state[p]
<5> for k, v in values.items():
<6> if isinstance(v, torch.Tensor):
<7> self.state[p][k] = v.to(p.device)
<8>
|
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
+ def register_module_override(self, module, param_name, config):
+ self.module_weight_config_triple.append((module, param_name, config))
+
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
def initialize(self):
self.pid2config = {}
self.index2config = {}
self.optimizer = None
self.uses_config_override = False
+ self.module_weight_config_triple = []
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def __init__(self, params, defaults, optim_bits=32):
super(Optimizer8bit, self).__init__(params, defaults)
+ self.initialized = False
- self.checked_if_on_gpu = False
self.name2qmap = {}
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = set(
['qmap1', 'qmap2',
'max1', 'max2',
'new_max1', 'new_max2',
'state1', 'state2',
'gnorm_vec', 'absmax1', 'absmax2',
'unorm_vec'])
if optim_bits == 8: self.fill_qmap()
|
bitsandbytes.optim.optimizer/Optimizer8bit.step
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
20e1677dfdc4495038fd780807c8cbc253adf921
|
Added module override, bnb.nn.Embedding #13 #15 #19
|
<13>:<add> if not self.initialized:
<add> self.check_overrides()
<add> self.to_gpu() # needed for fairseq pure fp16 training
<del> if not self.checked_if_on_gpu: self.to_gpu() # needed for fairseq pure fp16 training
<14>:<add> self.initialized = True
<add>
|
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
<0> """Performs a single optimization step.
<1>
<2> Arguments:
<3> closure (callable, optional): A closure that reevaluates the model
<4> and returns the loss.
<5> """
<6> loss = None
<7> if closure is not None:
<8> with torch.enable_grad():
<9> loss = closure()
<10>
<11> overflows = []
<12>
<13> if not self.checked_if_on_gpu: self.to_gpu() # needed for fairseq pure fp16 training
<14> for gindex, group in enumerate(self.param_groups):
<15> for pindex, p in enumerate(group['params']):
<16> if p.grad is None:
<17> continue
<18> state = self.state[p]
<19> if len(state) == 0:
<20> self.init_state(group, p, gindex, pindex)
<21>
<22> self.update_step(group, p, gindex, pindex)
<23>
<24> return loss
<25>
|
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
- self.checked_if_on_gpu = True
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group['params']):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
self.state[p][k] = v.to(p.device)
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
+ def register_module_override(self, module, param_name, config):
+ self.module_weight_config_triple.append((module, param_name, config))
+
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
def initialize(self):
self.pid2config = {}
self.index2config = {}
self.optimizer = None
self.uses_config_override = False
+ self.module_weight_config_triple = []
===========changed ref 3===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def __init__(self, params, defaults, optim_bits=32):
super(Optimizer8bit, self).__init__(params, defaults)
+ self.initialized = False
- self.checked_if_on_gpu = False
self.name2qmap = {}
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = set(
['qmap1', 'qmap2',
'max1', 'max2',
'new_max1', 'new_max2',
'state1', 'state2',
'gnorm_vec', 'absmax1', 'absmax2',
'unorm_vec'])
if optim_bits == 8: self.fill_qmap()
|
bitsandbytes.nn.modules/StableEmbedding.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
20e1677dfdc4495038fd780807c8cbc253adf921
|
Added module override, bnb.nn.Embedding #13 #15 #19
|
<2>:<del> GlobalOptimManager.get_instance().register_parameters(self.weight)
<3>:<add> GlobalOptimManager.get_instance().register_module_override(self, 'weight', {'optim_bits': 32})
<del> GlobalOptimManager.get_instance().override_config(self.weight, 'optim_bits', 32)
|
# module: bitsandbytes.nn.modules
class StableEmbedding(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None) -> None:
<0> super(StableEmbedding, self).__init__(num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight)
<1> self.norm = torch.nn.LayerNorm(embedding_dim)
<2> GlobalOptimManager.get_instance().register_parameters(self.weight)
<3> GlobalOptimManager.get_instance().override_config(self.weight, 'optim_bits', 32)
<4>
|
===========unchanged ref 0===========
at: torch.nn.modules.sparse.Embedding
__constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
'norm_type', 'scale_grad_by_freq', 'sparse']
num_embeddings: int
embedding_dim: int
padding_idx: Optional[int]
max_norm: Optional[float]
norm_type: float
scale_grad_by_freq: bool
weight: Tensor
freeze: bool
sparse: bool
__init__(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None, max_norm: Optional[float]=None, norm_type: float=2., scale_grad_by_freq: bool=False, sparse: bool=False, _weight: Optional[Tensor]=None, _freeze: bool=False, device=None, dtype=None) -> None
__init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None, max_norm: Optional[float]=None, norm_type: float=2., scale_grad_by_freq: bool=False, sparse: bool=False, _weight: Optional[Tensor]=None, _freeze: bool=False, device=None, dtype=None) -> None
===========changed ref 0===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
+ def register_module_override(self, module, param_name, config):
+ self.module_weight_config_triple.append((module, param_name, config))
+
===========changed ref 1===========
# module: bitsandbytes.optim.optimizer
class GlobalOptimManager(object):
def initialize(self):
self.pid2config = {}
self.index2config = {}
self.optimizer = None
self.uses_config_override = False
+ self.module_weight_config_triple = []
===========changed ref 2===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def to_gpu(self):
- self.checked_if_on_gpu = True
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group['params']):
if p in self.state:
values = self.state[p]
for k, v in values.items():
if isinstance(v, torch.Tensor):
self.state[p][k] = v.to(p.device)
===========changed ref 3===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
def __init__(self, params, defaults, optim_bits=32):
super(Optimizer8bit, self).__init__(params, defaults)
+ self.initialized = False
- self.checked_if_on_gpu = False
self.name2qmap = {}
self.mng = GlobalOptimManager.get_instance()
self.non_castable_tensor_keys = set(
['qmap1', 'qmap2',
'max1', 'max2',
'new_max1', 'new_max2',
'state1', 'state2',
'gnorm_vec', 'absmax1', 'absmax2',
'unorm_vec'])
if optim_bits == 8: self.fill_qmap()
===========changed ref 4===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
+ def check_overrides(self):
+ for module, attr, config in self.mng.module_weight_config_triple:
+ pmodule = getattr(module, attr)
+ assert pmodule is not None
+ assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)
+ found = False
+ for gindex, group in enumerate(self.param_groups):
+ if found: break
+ for pindex, p in enumerate(group['params']):
+ if found: break
+ if id(p) == id(pmodule):
+ # found the matching parameter
+ # init override
+ self.mng.pid2config[id(p)] = config
+ self.mng.index2config[(gindex, pindex)] = self.mng.pid2config[id(p)]
+ found = True
+
===========changed ref 5===========
# module: bitsandbytes.optim.optimizer
class Optimizer8bit(torch.optim.Optimizer):
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
overflows = []
+ if not self.initialized:
+ self.check_overrides()
+ self.to_gpu() # needed for fairseq pure fp16 training
- if not self.checked_if_on_gpu: self.to_gpu() # needed for fairseq pure fp16 training
+ self.initialized = True
+
for gindex, group in enumerate(self.param_groups):
for pindex, p in enumerate(group['params']):
if p.grad is None:
continue
state = self.state[p]
if len(state) == 0:
self.init_state(group, p, gindex, pindex)
self.update_step(group, p, gindex, pindex)
return loss
|
bitsandbytes.optim.lars/LARS.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'LARS without momentum is not supported!')
<del> raise NotImplementError(f'LARS without momentum is not supported!')
|
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
<0> if momentum == 0:
<1> raise NotImplementError(f'LARS without momentum is not supported!')
<2> super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
<3> weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.lars
LARS(params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02)
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
===========changed ref 0===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 1===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
|
bitsandbytes.optim.lars/LARS8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'LARS without momentum is not supported!')
<del> raise NotImplementError(f'LARS without momentum is not supported!')
|
# module: bitsandbytes.optim.lars
class LARS8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
<0> if momentum == 0:
<1> raise NotImplementError(f'LARS without momentum is not supported!')
<2> super(LARS8bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
<3> weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.lars
LARS8bit(params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02)
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
===========changed ref 0===========
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 1===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 2===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
|
bitsandbytes.optim.lars/LARS32bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'LARS without momentum is not supported!')
<del> raise NotImplementError(f'LARS without momentum is not supported!')
|
# module: bitsandbytes.optim.lars
class LARS32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
<0> if momentum == 0:
<1> raise NotImplementError(f'LARS without momentum is not supported!')
<2> super(LARS32bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
<3> weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.lars
LARS32bit(params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02)
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
===========changed ref 0===========
# module: bitsandbytes.optim.lars
class LARS8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS8bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 1===========
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 2===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 3===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
|
bitsandbytes.optim.sgd/SGD.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'SGD without momentum is not supported!')
<del> raise NotImplementError(f'SGD without momentum is not supported!')
|
# module: bitsandbytes.optim.sgd
class SGD(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
<0> if momentum == 0:
<1> raise NotImplementError(f'SGD without momentum is not supported!')
<2> super(SGD, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
<3> weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
at: bitsandbytes.optim.sgd
SGD(params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True)
===========changed ref 0===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 1===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
===========changed ref 2===========
# module: bitsandbytes.optim.lars
class LARS32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS32bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 3===========
# module: bitsandbytes.optim.lars
class LARS8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS8bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 4===========
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
|
bitsandbytes.optim.sgd/SGD8bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'SGD without momentum is not supported!')
<del> raise NotImplementError(f'SGD without momentum is not supported!')
|
# module: bitsandbytes.optim.sgd
class SGD8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
<0> if momentum == 0:
<1> raise NotImplementError(f'SGD without momentum is not supported!')
<2> super(SGD8bit, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
<3> weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
at: bitsandbytes.optim.sgd
SGD8bit(params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True)
===========changed ref 0===========
# module: bitsandbytes.optim.sgd
class SGD(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if momentum == 0:
+ raise NotImplementedError(f'SGD without momentum is not supported!')
- raise NotImplementError(f'SGD without momentum is not supported!')
super(SGD, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
===========changed ref 1===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 2===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
===========changed ref 3===========
# module: bitsandbytes.optim.lars
class LARS32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS32bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 4===========
# module: bitsandbytes.optim.lars
class LARS8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS8bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 5===========
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
|
bitsandbytes.optim.sgd/SGD32bit.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'SGD without momentum is not supported!')
<del> raise NotImplementError(f'SGD without momentum is not supported!')
|
# module: bitsandbytes.optim.sgd
class SGD32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
<0> if momentum == 0:
<1> raise NotImplementError(f'SGD without momentum is not supported!')
<2> super(SGD32bit, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
<3> weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise)
<4>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
__init__(self, optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
at: bitsandbytes.optim.sgd
SGD32bit(params, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True)
===========changed ref 0===========
# module: bitsandbytes.optim.sgd
class SGD8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if momentum == 0:
+ raise NotImplementedError(f'SGD without momentum is not supported!')
- raise NotImplementError(f'SGD without momentum is not supported!')
super(SGD8bit, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise)
===========changed ref 1===========
# module: bitsandbytes.optim.sgd
class SGD(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if momentum == 0:
+ raise NotImplementedError(f'SGD without momentum is not supported!')
- raise NotImplementError(f'SGD without momentum is not supported!')
super(SGD, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
===========changed ref 2===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 3===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
===========changed ref 4===========
# module: bitsandbytes.optim.lars
class LARS32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS32bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 5===========
# module: bitsandbytes.optim.lars
class LARS8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS8bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 6===========
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
|
bitsandbytes.optim.rmsprop/RMSprop.__init__
|
Modified
|
bitsandbytes-foundation~bitsandbytes
|
33efe4a09f459832e8beceba70add0695cc485e4
|
Remove unused imports, fix NotImplementedError
|
<1>:<add> raise NotImplementedError(f'RMSprop with alpha==0.0 is not supported!')
<del> raise NotImplementError(f'RMSprop with alpha==0.0 is not supported!')
<3>:<add> raise NotImplementedError(f'Centered RMSprop is not supported!')
<del> raise NotImplementError(f'Centered RMSprop is not supported!')
|
# module: bitsandbytes.optim.rmsprop
class RMSprop(Optimizer1State):
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
<0> if alpha == 0:
<1> raise NotImplementError(f'RMSprop with alpha==0.0 is not supported!')
<2> if centered:
<3> raise NotImplementError(f'Centered RMSprop is not supported!')
<4> super(RMSprop, self).__init__('rmsprop', params, lr, (alpha, momentum), eps,
<5> weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
<6>
|
===========unchanged ref 0===========
at: bitsandbytes.optim.optimizer.Optimizer1State
__init__(optimizer_name, params, lr=1e-3, betas=(0.9, 0.0), eps=1e-8, weight_decay=0.0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, max_unorm=0.0, skip_zeros=False)
at: bitsandbytes.optim.rmsprop
RMSprop(params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True)
===========changed ref 0===========
# module: bitsandbytes.optim.adagrad
-
-
===========changed ref 1===========
# module: bitsandbytes.optim.adagrad
- torch.optim.Adagrad
-
===========changed ref 2===========
# module: bitsandbytes.optim.sgd
class SGD32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if momentum == 0:
+ raise NotImplementedError(f'SGD without momentum is not supported!')
- raise NotImplementError(f'SGD without momentum is not supported!')
super(SGD32bit, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise)
===========changed ref 3===========
# module: bitsandbytes.optim.sgd
class SGD8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if momentum == 0:
+ raise NotImplementedError(f'SGD without momentum is not supported!')
- raise NotImplementError(f'SGD without momentum is not supported!')
super(SGD8bit, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise)
===========changed ref 4===========
# module: bitsandbytes.optim.sgd
class SGD(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, block_wise=True):
if momentum == 0:
+ raise NotImplementedError(f'SGD without momentum is not supported!')
- raise NotImplementError(f'SGD without momentum is not supported!')
super(SGD, self).__init__('momentum', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise)
===========changed ref 5===========
# module: bitsandbytes.optim.lars
class LARS32bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS32bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 32, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 6===========
# module: bitsandbytes.optim.lars
class LARS8bit(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS8bit, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, 8, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
===========changed ref 7===========
# module: bitsandbytes.optim.lars
class LARS(Optimizer1State):
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, optim_bits=32, args=None,
min_8bit_size=4096, percentile_clipping=100, max_unorm=0.02):
if momentum == 0:
+ raise NotImplementedError(f'LARS without momentum is not supported!')
- raise NotImplementError(f'LARS without momentum is not supported!')
super(LARS, self).__init__('lars', params, lr, (momentum, dampening), 0.0,
weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, max_unorm=max_unorm, block_wise=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.