path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
ciphey.basemods.Decoders.octal/Octal.decode
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<1>:<del> It takes an octal string and return a string
<2>:<del> :octal_str: octal str like "110 145 154"
<3>:<add> Performs Octal decoding
<5>:<add> octal_seq = ctext.split(" ")
<del> octal_seq = text.split(" ")
<8>:<add> if len(ctext) % 3 != 0:
<del> if len(text) % 3 != 0:
<10>:<add> octal_seq = [ctext[i : i + 3] for i in range(0, len(ctext), 3)]
<del> octal_seq = [text[i : i + 3] for i in range(0, len(text), 3)]
<15>:<add> logger.trace("Octal subseq too long")
<del> logger.trace(f"Octal subseq too long")
|
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: str) -> Optional[bytes]:
<0> """
<1> It takes an octal string and return a string
<2> :octal_str: octal str like "110 145 154"
<3> """
<4> str_converted = []
<5> octal_seq = text.split(" ")
<6> if len(octal_seq) == 1:
<7> # Concatted octal must be formed of octal triplets
<8> if len(text) % 3 != 0:
<9> return None
<10> octal_seq = [text[i : i + 3] for i in range(0, len(text), 3)]
<11> logger.trace(f"Trying chunked octal {octal_seq}")
<12> try:
<13> for octal_char in octal_seq:
<14> if len(octal_char) > 3:
<15> logger.trace(f"Octal subseq too long")
<16> return None
<17> n = int(octal_char, 8)
<18> if (
<19> n < 0
<20> ): # n cannot be greater than 255, as we checked that with the earlier length check
<21> logger.trace(f"Non octal char {octal_char}")
<22> return None
<23> str_converted.append(n)
<24>
<25> return bytes(str_converted)
<26> # Catch bad octal chars
<27> except ValueError:
<28> return None
<29>
|
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 4===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 7===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 9===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 10===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 11===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 12===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 13===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 14===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
===========changed ref 15===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The morse code dictionary to use",
+ req=False,
+ default="cipheydists::translate::morse",
+ )
+ }
+
===========changed ref 16===========
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
@lru_cache() # To save extra sorting
def get_decoders_for(self, t: type):
+ ret = registry[Decoder[t]]
- ret = [j for i in registry[Decoder][t].values() for j in i]
ret.sort(key=lambda x: x.priority(), reverse=True)
return ret
===========changed ref 17===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.MORSE_CODE_DICT = config.get_resource(self._params()["dict"], Translation)
+ self.MORSE_CODE_DICT_INV = {v: k for k, v in self.MORSE_CODE_DICT.items()}
+
===========changed ref 18===========
# module: ciphey.basemods.Decoders.bases
for name, (decoder, priority) in _bases.items():
t = types.new_class(
name,
+ (Decoder[str],),
- (ciphey.iface.Decoder[str, bytes],),
exec_body=lambda x: gen_class(name, decoder, priority, x),
)
+ registry.register(t)
- ciphey.iface.registry.register(t)
===========changed ref 19===========
# module: ciphey.mathsHelper
class mathsHelper:
+
+ @staticmethod
+ def strip_punctuation(text: str) -> str:
+ """Strips punctuation from a given string.
+
+ Uses string.punctuation.
+
+ Args:
+ text -> the text to strip punctuation from.
+
+ Returns:
+ Returns string without punctuation.
+ """
+ text: str = (str(text).translate(str.maketrans("", "", punctuation))).strip(
+ "\n"
+ )
+ return text
+
===========changed ref 20===========
# module: ciphey.basemods.Crackers.affine
@registry.register
class Affine(Cracker[str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"expected": ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
+ "group": ParamSpec(
- "group": ciphey.iface.ParamSpec(
desc="An ordered sequence of chars that make up the alphabet",
req=False,
default="abcdefghijklmnopqrstuvwxyz",
),
}
|
ciphey.basemods.Decoders.octal/Octal.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> return None
<del> pass
|
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
<0> pass
<1>
|
===========changed ref 0===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: str) -> Optional[bytes]:
"""
- It takes an octal string and return a string
- :octal_str: octal str like "110 145 154"
+ Performs Octal decoding
"""
str_converted = []
+ octal_seq = ctext.split(" ")
- octal_seq = text.split(" ")
if len(octal_seq) == 1:
# Concatted octal must be formed of octal triplets
+ if len(ctext) % 3 != 0:
- if len(text) % 3 != 0:
return None
+ octal_seq = [ctext[i : i + 3] for i in range(0, len(ctext), 3)]
- octal_seq = [text[i : i + 3] for i in range(0, len(text), 3)]
logger.trace(f"Trying chunked octal {octal_seq}")
try:
for octal_char in octal_seq:
if len(octal_char) > 3:
+ logger.trace("Octal subseq too long")
- logger.trace(f"Octal subseq too long")
return None
n = int(octal_char, 8)
if (
n < 0
): # n cannot be greater than 255, as we checked that with the earlier length check
logger.trace(f"Non octal char {octal_char}")
return None
str_converted.append(n)
return bytes(str_converted)
# Catch bad octal chars
except ValueError:
return None
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 7===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 9===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 10===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 11===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 12===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 14===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 15===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The morse code dictionary to use",
+ req=False,
+ default="cipheydists::translate::morse",
+ )
+ }
+
===========changed ref 17===========
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
@lru_cache() # To save extra sorting
def get_decoders_for(self, t: type):
+ ret = registry[Decoder[t]]
- ret = [j for i in registry[Decoder][t].values() for j in i]
ret.sort(key=lambda x: x.priority(), reverse=True)
return ret
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.MORSE_CODE_DICT = config.get_resource(self._params()["dict"], Translation)
+ self.MORSE_CODE_DICT_INV = {v: k for k, v in self.MORSE_CODE_DICT.items()}
+
|
ciphey.basemods.Resources.cipheydists/CipheyDists.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> return None
<del> pass
|
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
<0> pass
<1>
|
===========unchanged ref 0===========
at: ciphey.iface._modules
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 4===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 5===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 7===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 9===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 10===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 11===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 12===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 14===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 15===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The morse code dictionary to use",
+ req=False,
+ default="cipheydists::translate::morse",
+ )
+ }
+
===========changed ref 17===========
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
@lru_cache() # To save extra sorting
def get_decoders_for(self, t: type):
+ ret = registry[Decoder[t]]
- ret = [j for i in registry[Decoder][t].values() for j in i]
ret.sort(key=lambda x: x.priority(), reverse=True)
return ret
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.MORSE_CODE_DICT = config.get_resource(self._params()["dict"], Translation)
+ self.MORSE_CODE_DICT_INV = {v: k for k, v in self.MORSE_CODE_DICT.items()}
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.bases
for name, (decoder, priority) in _bases.items():
t = types.new_class(
name,
+ (Decoder[str],),
- (ciphey.iface.Decoder[str, bytes],),
exec_body=lambda x: gen_class(name, decoder, priority, x),
)
+ registry.register(t)
- ciphey.iface.registry.register(t)
===========changed ref 20===========
# module: ciphey.mathsHelper
class mathsHelper:
+
+ @staticmethod
+ def strip_punctuation(text: str) -> str:
+ """Strips punctuation from a given string.
+
+ Uses string.punctuation.
+
+ Args:
+ text -> the text to strip punctuation from.
+
+ Returns:
+ Returns string without punctuation.
+ """
+ text: str = (str(text).translate(str.maketrans("", "", punctuation))).strip(
+ "\n"
+ )
+ return text
+
|
ciphey.basemods.Decoders.reverse/Reverse.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> return None
<del> pass
|
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
<0> pass
<1>
|
===========unchanged ref 0===========
at: ciphey.iface._modules
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 10===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 12===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 13===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 14===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 16===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 17===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 18===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 20===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 23===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 24===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
|
ciphey.basemods.Crackers.xorcrypt/XorCrypt.getInfo
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<16>:<add> "xorcrypt::likely_lens",
<del> f"xorcrypt::likely_lens",
<29>:<add> success_likelihood=0.9, # Dunno, but it's quite likely
<del> success_likelihood=0.9, # Dunno, but it's quite likely
|
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def getInfo(self, ctext: bytes) -> CrackInfo:
<0> if self.keysize is not None:
<1> analysis = self.cache.get_or_update(
<2> ctext,
<3> f"xorcrypt::{self.keysize}",
<4> lambda: cipheycore.analyse_string(ctext, self.keysize, self.group),
<5> )
<6>
<7> return CrackInfo(
<8> success_likelihood=cipheycore.xorcrypt_detect(analysis, self.expected),
<9> # TODO: actually calculate runtimes
<10> success_runtime=1e-4,
<11> failure_runtime=1e-4,
<12> )
<13>
<14> keysize = self.cache.get_or_update(
<15> ctext,
<16> f"xorcrypt::likely_lens",
<17> lambda: cipheycore.xorcrypt_guess_len(ctext),
<18> )
<19>
<20> if keysize == 1:
<21> return CrackInfo(
<22> success_likelihood=0,
<23> # TODO: actually calculate runtimes
<24> success_runtime=2e-3,
<25> failure_runtime=2e-2,
<26> )
<27>
<28> return CrackInfo(
<29> success_likelihood=0.9, # Dunno, but it's quite likely
<30> # TODO: actually calculate runtimes
<31> success_runtime=2e-3,
<32> failure_runtime=2e-2,
<33> )
<34>
|
===========unchanged ref 0===========
at: ciphey.basemods.Crackers.xorcrypt.XorCrypt.__init__
self.expected = config.get_resource(self._params()["expected"])
self.cache = config.cache
self.keysize = self._params().get("keysize")
self.keysize = int(self.keysize)
at: ciphey.basemods.Crackers.xorcrypt.XorCrypt.getInfo
analysis = self.cache.get_or_update(
ctext,
f"xorcrypt::{self.keysize}",
lambda: cipheycore.analyse_string(ctext, self.keysize, self.group),
)
at: ciphey.iface._config.Cache
get_or_update(ctext: Any, keyname: str, get_value: Callable[[], Any])
at: ciphey.iface._modules
CrackInfo(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: ciphey.iface._modules.Targeted
getTarget() -> str
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 11===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 14===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 17===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 21===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
|
ciphey.basemods.Crackers.xorcrypt/XorCrypt.attemptCrack
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<12>:<add>
<del> else:
<13>:<add> len = self.cache.get_or_update(
<del> len = self.cache.get_or_update(
<14>:<add> ctext,
<add> "xorcrypt::likely_lens",
<add> lambda: cipheycore.xorcrypt_guess_len(ctext),
<add> )
<add>
<add> logger.trace(f"Got possible length {len}")
<add>
<add> if len < 2:
<add> return []
<add>
<add> ret = []
<add> # Fuzz around
<add> for i in range(min(len - 2, 2), len + 2):
<add> ret += self.crackOne(
<15>:<add> self.cache.get_or_update(
<add> ctext,
<add> f"xorcrypt::{len}",
<del> f"xorcrypt::likely_lens",
<16>:<add> lambda: cipheycore.analyse_bytes(ctext, len),
<add> ),
<del> lambda: cipheycore.xorcrypt_guess_len(ctext),
<19>:<add> return ret
<del> logger.trace(f"Got possible length {len}")
<21>:<del> if len < 2:
<22>:<del> return []
<23>:<del>
<24>:<del> ret = []
<25>:<del> # Fuzz around
<26>:<del> for i in range(min(len - 2, 2), len + 2):
<27>:<del> ret += self.crackOne(
<28>:<del> ctext,
<29>:<del> self.cache.get_or_update(
<30>:<del> ctext,
<31>:<del> f"xorcrypt::{len}",
<32>:<del> lambda: cipheycore.analyse_bytes(ctext, len),
<33>:<del> )
<34>:<del> )
<35>:<del>
|
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def attemptCrack(self, ctext: bytes) -> List[CrackResult]:
<0> logger.debug(f"Trying xorcrypt cipher on {base64.b64encode(ctext)}")
<1>
<2> # Analysis must be done here, where we know the case for the cache
<3> if self.keysize is not None:
<4> return self.crackOne(
<5> ctext,
<6> self.cache.get_or_update(
<7> ctext,
<8> f"xorcrypt::{self.keysize}",
<9> lambda: cipheycore.analyse_bytes(ctext, self.keysize),
<10> ),
<11> )
<12> else:
<13> len = self.cache.get_or_update(
<14> ctext,
<15> f"xorcrypt::likely_lens",
<16> lambda: cipheycore.xorcrypt_guess_len(ctext),
<17> )
<18>
<19> logger.trace(f"Got possible length {len}")
<20>
<21> if len < 2:
<22> return []
<23>
<24> ret = []
<25> # Fuzz around
<26> for i in range(min(len - 2, 2), len + 2):
<27> ret += self.crackOne(
<28> ctext,
<29> self.cache.get_or_update(
<30> ctext,
<31> f"xorcrypt::{len}",
<32> lambda: cipheycore.analyse_bytes(ctext, len),
<33> )
<34> )
<35>
<36> return ret
<37>
|
===========unchanged ref 0===========
at: ciphey.basemods.Crackers.xorcrypt.XorCrypt
crackOne(ctext: bytes, analysis: cipheycore.windowed_analysis_res) -> List[CrackResult]
at: ciphey.basemods.Crackers.xorcrypt.XorCrypt.__init__
self.cache = config.cache
self.keysize = self._params().get("keysize")
self.keysize = int(self.keysize)
at: ciphey.iface._config.Cache
get_or_update(ctext: Any, keyname: str, get_value: Callable[[], Any])
at: ciphey.iface._modules
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def getInfo(self, ctext: bytes) -> CrackInfo:
if self.keysize is not None:
analysis = self.cache.get_or_update(
ctext,
f"xorcrypt::{self.keysize}",
lambda: cipheycore.analyse_string(ctext, self.keysize, self.group),
)
return CrackInfo(
success_likelihood=cipheycore.xorcrypt_detect(analysis, self.expected),
# TODO: actually calculate runtimes
success_runtime=1e-4,
failure_runtime=1e-4,
)
keysize = self.cache.get_or_update(
ctext,
+ "xorcrypt::likely_lens",
- f"xorcrypt::likely_lens",
lambda: cipheycore.xorcrypt_guess_len(ctext),
)
if keysize == 1:
return CrackInfo(
success_likelihood=0,
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
return CrackInfo(
+ success_likelihood=0.9, # Dunno, but it's quite likely
- success_likelihood=0.9, # Dunno, but it's quite likely
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 8===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 12===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 13===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 14===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 15===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 18===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 19===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
|
ciphey.basemods.Crackers.xorcrypt/XorCrypt.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<1>:<add> "expected": ParamSpec(
<del> "expected": ciphey.iface.ParamSpec(
<6>:<add> "keysize": ParamSpec(
<del> "keysize": ciphey.iface.ParamSpec(
<10>:<add> "p_value": ParamSpec(
<del> "p_value": ciphey.iface.ParamSpec(
|
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
<0> return {
<1> "expected": ciphey.iface.ParamSpec(
<2> desc="The expected distribution of the plaintext",
<3> req=False,
<4> config_ref=["default_dist"],
<5> ),
<6> "keysize": ciphey.iface.ParamSpec(
<7> desc="A key size that should be used. If not given, will attempt to work it out",
<8> req=False,
<9> ),
<10> "p_value": ciphey.iface.ParamSpec(
<11> desc="The p-value to use for windowed frequency analysis",
<12> req=False,
<13> default=0.001,
<14> ),
<15> }
<16>
|
===========unchanged ref 0===========
at: ciphey.iface._config
Config()
at: ciphey.iface._config.Config
get_resource(res_name: str, t: Optional[Type]=None) -> Any
at: ciphey.iface._config.Config.__init__
self.cache: Cache = Cache()
at: ciphey.iface._modules
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: ciphey.iface._modules.ConfigurableModule
_params()
at: ciphey.iface._modules.Cracker
__init__(config: Config)
__init__(self, config: Config)
===========changed ref 0===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def getInfo(self, ctext: bytes) -> CrackInfo:
if self.keysize is not None:
analysis = self.cache.get_or_update(
ctext,
f"xorcrypt::{self.keysize}",
lambda: cipheycore.analyse_string(ctext, self.keysize, self.group),
)
return CrackInfo(
success_likelihood=cipheycore.xorcrypt_detect(analysis, self.expected),
# TODO: actually calculate runtimes
success_runtime=1e-4,
failure_runtime=1e-4,
)
keysize = self.cache.get_or_update(
ctext,
+ "xorcrypt::likely_lens",
- f"xorcrypt::likely_lens",
lambda: cipheycore.xorcrypt_guess_len(ctext),
)
if keysize == 1:
return CrackInfo(
success_likelihood=0,
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
return CrackInfo(
+ success_likelihood=0.9, # Dunno, but it's quite likely
- success_likelihood=0.9, # Dunno, but it's quite likely
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
===========changed ref 1===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def attemptCrack(self, ctext: bytes) -> List[CrackResult]:
logger.debug(f"Trying xorcrypt cipher on {base64.b64encode(ctext)}")
# Analysis must be done here, where we know the case for the cache
if self.keysize is not None:
return self.crackOne(
ctext,
self.cache.get_or_update(
ctext,
f"xorcrypt::{self.keysize}",
lambda: cipheycore.analyse_bytes(ctext, self.keysize),
),
)
+
- else:
+ len = self.cache.get_or_update(
- len = self.cache.get_or_update(
+ ctext,
+ "xorcrypt::likely_lens",
+ lambda: cipheycore.xorcrypt_guess_len(ctext),
+ )
+
+ logger.trace(f"Got possible length {len}")
+
+ if len < 2:
+ return []
+
+ ret = []
+ # Fuzz around
+ for i in range(min(len - 2, 2), len + 2):
+ ret += self.crackOne(
ctext,
+ self.cache.get_or_update(
+ ctext,
+ f"xorcrypt::{len}",
- f"xorcrypt::likely_lens",
+ lambda: cipheycore.analyse_bytes(ctext, len),
+ ),
- lambda: cipheycore.xorcrypt_guess_len(ctext),
)
+ return ret
- logger.trace(f"Got possible length {len}")
- if len < 2:
- return []
-
- ret = []
- # Fuzz around
</s>
===========changed ref 2===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def attemptCrack(self, ctext: bytes) -> List[CrackResult]:
# offset: 1
<s>
- if len < 2:
- return []
-
- ret = []
- # Fuzz around
- for i in range(min(len - 2, 2), len + 2):
- ret += self.crackOne(
- ctext,
- self.cache.get_or_update(
- ctext,
- f"xorcrypt::{len}",
- lambda: cipheycore.analyse_bytes(ctext, len),
- )
- )
-
- return ret
-
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 6===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 9===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
|
ciphey.basemods.Crackers.xorcrypt/XorCrypt.__init__
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<7>:<add> self.max_key_length = 16
<del> self.MAX_KEY_LENGTH = 16
|
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
+ def __init__(self, config: Config):
- def __init__(self, config: ciphey.iface.Config):
<0> super().__init__(config)
<1> self.expected = config.get_resource(self._params()["expected"])
<2> self.cache = config.cache
<3> self.keysize = self._params().get("keysize")
<4> if self.keysize is not None:
<5> self.keysize = int(self.keysize)
<6> self.p_value = self._params()["p_value"]
<7> self.MAX_KEY_LENGTH = 16
<8>
|
===========unchanged ref 0===========
at: ciphey.iface._modules.ConfigurableModule
_params()
===========changed ref 0===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
+ "expected": ParamSpec(
- "expected": ciphey.iface.ParamSpec(
desc="The expected distribution of the plaintext",
req=False,
config_ref=["default_dist"],
),
+ "keysize": ParamSpec(
- "keysize": ciphey.iface.ParamSpec(
desc="A key size that should be used. If not given, will attempt to work it out",
req=False,
),
+ "p_value": ParamSpec(
- "p_value": ciphey.iface.ParamSpec(
desc="The p-value to use for windowed frequency analysis",
req=False,
default=0.001,
),
}
===========changed ref 1===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def getInfo(self, ctext: bytes) -> CrackInfo:
if self.keysize is not None:
analysis = self.cache.get_or_update(
ctext,
f"xorcrypt::{self.keysize}",
lambda: cipheycore.analyse_string(ctext, self.keysize, self.group),
)
return CrackInfo(
success_likelihood=cipheycore.xorcrypt_detect(analysis, self.expected),
# TODO: actually calculate runtimes
success_runtime=1e-4,
failure_runtime=1e-4,
)
keysize = self.cache.get_or_update(
ctext,
+ "xorcrypt::likely_lens",
- f"xorcrypt::likely_lens",
lambda: cipheycore.xorcrypt_guess_len(ctext),
)
if keysize == 1:
return CrackInfo(
success_likelihood=0,
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
return CrackInfo(
+ success_likelihood=0.9, # Dunno, but it's quite likely
- success_likelihood=0.9, # Dunno, but it's quite likely
# TODO: actually calculate runtimes
success_runtime=2e-3,
failure_runtime=2e-2,
)
===========changed ref 2===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def attemptCrack(self, ctext: bytes) -> List[CrackResult]:
logger.debug(f"Trying xorcrypt cipher on {base64.b64encode(ctext)}")
# Analysis must be done here, where we know the case for the cache
if self.keysize is not None:
return self.crackOne(
ctext,
self.cache.get_or_update(
ctext,
f"xorcrypt::{self.keysize}",
lambda: cipheycore.analyse_bytes(ctext, self.keysize),
),
)
+
- else:
+ len = self.cache.get_or_update(
- len = self.cache.get_or_update(
+ ctext,
+ "xorcrypt::likely_lens",
+ lambda: cipheycore.xorcrypt_guess_len(ctext),
+ )
+
+ logger.trace(f"Got possible length {len}")
+
+ if len < 2:
+ return []
+
+ ret = []
+ # Fuzz around
+ for i in range(min(len - 2, 2), len + 2):
+ ret += self.crackOne(
ctext,
+ self.cache.get_or_update(
+ ctext,
+ f"xorcrypt::{len}",
- f"xorcrypt::likely_lens",
+ lambda: cipheycore.analyse_bytes(ctext, len),
+ ),
- lambda: cipheycore.xorcrypt_guess_len(ctext),
)
+ return ret
- logger.trace(f"Got possible length {len}")
- if len < 2:
- return []
-
- ret = []
- # Fuzz around
</s>
===========changed ref 3===========
# module: ciphey.basemods.Crackers.xorcrypt
@registry.register
+ class XorCrypt(Cracker[bytes]):
- class XorCrypt(ciphey.iface.Cracker[bytes]):
def attemptCrack(self, ctext: bytes) -> List[CrackResult]:
# offset: 1
<s>
- if len < 2:
- return []
-
- ret = []
- # Fuzz around
- for i in range(min(len - 2, 2), len + 2):
- ret += self.crackOne(
- ctext,
- self.cache.get_or_update(
- ctext,
- f"xorcrypt::{len}",
- lambda: cipheycore.analyse_bytes(ctext, len),
- )
- )
-
- return ret
-
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 6===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 7===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 8===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 9===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 10===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 11===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
|
ciphey.basemods.Checkers.human/HumanChecker.check
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<1>:<add> response = input(f"Result {text.__repr__()} (y/N): ").lower()
<del> response = input(f'Result {text.__repr__()} (y/N): ').lower()
<4>:<add> elif response in ("n", ""):
<del> elif response == "n" or response == "":
|
# module: ciphey.basemods.Checkers.human
@registry.register
class HumanChecker(Checker[str]):
def check(self, text: str) -> Optional[str]:
<0> with self._config().pause_spinner_handle():
<1> response = input(f'Result {text.__repr__()} (y/N): ').lower()
<2> if response == "y":
<3> return ""
<4> elif response == "n" or response == "":
<5> return None
<6> else:
<7> return self.check(text)
<8>
|
===========unchanged ref 0===========
at: ciphey.iface._modules.Checker
check(self, text: T) -> Optional[str]
at: ciphey.iface._modules.ConfigurableModule
_config()
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 11===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 14===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 17===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 21===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 24===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 25===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
|
ciphey.basemods.Decoders.baudot/Baudot.decode
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> result = ""
<del> ret = ""
<2>:<del> if type(ctext) == str:
<3>:<add> if re.search("^[01]{5}$", ctext.split()[0]):
<del> if re.search("^[01]{5}$", ctext.split()[0]):
<4>:<add> for i in ctext.split():
<del> for i in ctext.split():
<5>:<add> if i == "11011":
<del> if i == "11011":
<6>:<add> switch_to_digit_map = 1
<del> switch_to_digit_map = 1
<7>:<add> if i == "11111":
<del> if i == "11111":
<8>:<add> switch_to_digit_map = 0
<del> switch_to_digit_map = 0
<9>:<add> if switch_to_digit_map == 1:
<del> if switch_to_digit_map == 1:
<10>:<add> result += self.BAUDOT_DICT["+" + i]
<del> ret += self.BAUDOT_DICT["+" + i]
<11>:<add> if switch_to_digit_map == 0:
<del> if switch_to_digit_map == 0:
<12>:<add> result += self.BAUDOT_DICT[i]
<del> ret += self.BAUDOT_DICT[i]
<13>:<add> return result
<del> return ret
|
# module: ciphey.basemods.Decoders.baudot
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Baudot(Decoder[str]):
- class Baudot(Decoder[str, str]):
def decode(self, ctext: T) -> Optional[U]:
<0> ret = ""
<1> switch_to_digit_map = 0
<2> if type(ctext) == str:
<3> if re.search("^[01]{5}$", ctext.split()[0]):
<4> for i in ctext.split():
<5> if i == "11011":
<6> switch_to_digit_map = 1
<7> if i == "11111":
<8> switch_to_digit_map = 0
<9> if switch_to_digit_map == 1:
<10> ret += self.BAUDOT_DICT["+" + i]
<11> if switch_to_digit_map == 0:
<12> ret += self.BAUDOT_DICT[i]
<13> return ret
<14> else:
<15> return None
<16>
|
===========unchanged ref 0===========
at: ciphey.basemods.Decoders.baudot.Baudot.__init__
self.BAUDOT_DICT = config.get_resource(self._params()["dict"], Translation)
at: ciphey.iface._modules
T = TypeVar("T")
U = TypeVar("U")
at: ciphey.iface._modules.Decoder
decode(self, ctext: T) -> Optional[U]
at: re
search(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]
search(pattern: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 11===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 14===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 17===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 21===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 24===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
|
tests.enciphey/encipher_crypto.MorseCode
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<3>:<add> if m is None:
<del> if m == None:
|
# module: tests.enciphey
class encipher_crypto:
def MorseCode(self, text: str) -> str:
<0> morse = []
<1> for i in text:
<2> m = self.morse_dict.get(i.upper())
<3> if m == None:
<4> m = ""
<5> morse.append(m)
<6>
<7> output = morse
<8> # output = " ".join(MORSE_CODE_DICT.get(i.upper()) for i in text)
<9>
<10> return " ".join(output)
<11>
|
===========unchanged ref 0===========
at: tests.enciphey.encipher_crypto.__init__
self.morse_dict = dict(cipheydists.get_translate("morse"))
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 11===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 14===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 17===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 21===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 24===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 25===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
|
ciphey.basemods.Checkers.brandon/Brandon.clean_text
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<13>:<add> text = self.mh.strip_punctuation(text)
<del> text = self.mh.strip_puncuation(text)
<21>:<add> # poor mans lemmatisation
<del> # poor mans lemisation
<25>:<add> text = self.mh.strip_punctuation(x)
<del> text = self.mh.strip_puncuation(x)
|
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def clean_text(self, text: str) -> set:
<0> """Cleans the text ready to be checked
<1>
<2> Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
<3>
<4> Args:
<5> text -> The text we use to perform analysis on
<6>
<7> Returns:
<8> text -> the text as a list, now cleaned
<9>
<10> """
<11> # makes the text unique words and readable
<12> text = text.lower()
<13> text = self.mh.strip_puncuation(text)
<14> text = text.split(" ")
<15> text = filter(lambda x: len(x) > 2, text)
<16> text = set(text)
<17> return text
<18>
<19> x = []
<20> for word in text:
<21> # poor mans lemisation
<22> # removes 's from the dict'
<23> if word.endswith("'s"):
<24> x.append(word[0:-2])
<25> text = self.mh.strip_puncuation(x)
<26> # turns it all into lowercase and as a set
<27> complete = set([word.lower() for word in x])
<28>
<29> return complete
<30>
|
===========unchanged ref 0===========
at: ciphey.basemods.Checkers.brandon.Brandon
wordlist: set
at: ciphey.basemods.Checkers.brandon.Brandon.__init__
self.mh = mh.mathsHelper()
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 2===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 11===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 14===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 17===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 18===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 21===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
+ for src, dst in self.translate.items():
+ ctext = ctext.replace(src, dst)
+ return ctext
+
===========changed ref 24===========
# module: ciphey.iface._registry
try:
+ from typing import get_args, get_origin
- from typing import get_origin, get_args
except ImportError:
from typing_inspect import get_origin, get_args
===========changed ref 25===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return {
+ "dict": ParamSpec(
+ desc="The leetspeak dictionary to use",
+ req=False,
+ default="cipheydists::translate::leet",
+ )
+ }
+
|
ciphey.basemods.Checkers.brandon/Brandon.checker
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<2>:<add> The checker uses the variable passed to it. I.E. Stopwords list, 1k words, dictionary
<del> The checker uses the vairable passed to it. I.E. Stopwords list, 1k words, dictionary
<8>:<add> var -> the variable we are checking against. Stopwords list, 1k words list, dictionary list.
<del> var -> the variable we are checking against. Stopwords list, 1k words list, dictionray list.
<12>:<add> logger.trace("Checker's text is None, so returning False")
<del> logger.trace(f"Checker's text is None, so returning False")
<15>:<add> logger.trace("Checker's input var is None, so returning False")
<del> logger.trace(f"Checker's input var is None, so returning False")
|
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
<0> """Given text determine if it passes checker
<1>
<2> The checker uses the vairable passed to it. I.E. Stopwords list, 1k words, dictionary
<3>
<4> Args:
<5> text -> The text to check
<6> threshold -> at what point do we return True? The percentage of text that is in var before we return True
<7> text_length -> the length of the text
<8> var -> the variable we are checking against. Stopwords list, 1k words list, dictionray list.
<9> Returns:
<10> boolean -> True for it passes the test, False for it fails the test."""
<11> if text is None:
<12> logger.trace(f"Checker's text is None, so returning False")
<13> return False
<14> if var is None:
<15> logger.trace(f"Checker's input var is None, so returning False")
<16> return False
<17>
<18> percent = ceil(text_length * threshold)
<19> logger.trace(f"Checker's chunks are size {percent}")
<20> meet_threshold = 0
<21> location = 0
<22> end = percent
<23>
<24> if text_length <= 0:
<25> return False
<26>
<27> while location <= text_length:
<28> # chunks the text, so only gets THRESHOLD chunks of text at a time
<29> text = list(text)
<30> to_analyse = text[location:end]
<31> logger.trace(f"To analyse is {to_analyse}")
<32> for word in to_analyse:
<33> # if word is a stopword, + 1 to the counter
<34> if word in var:
</s>
|
===========below chunk 0===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
# offset: 1
f"{word} is in var, which means I am +=1 to the meet_threshold which is {meet_threshold}"
)
meet_threshold += 1
meet_threshold_percent = meet_threshold / text_length
if meet_threshold_percent >= threshold:
logger.trace(
f"Returning true since the percentage is {meet_threshold / text_length} and the threshold is {threshold}"
)
# if we meet the threshold, return True
# otherwise, go over again until we do
# We do this in the for loop because if we're at 24% and THRESHOLD is 25
# we don't want to wait THRESHOLD to return true, we want to return True ASAP
return True
location = end
end = end + percent
logger.trace(
f"The language proportion {meet_threshold_percent} is under the threshold {threshold}"
)
return False
===========unchanged ref 0===========
at: math
ceil(x: SupportsFloat, /) -> int
===========changed ref 0===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def clean_text(self, text: str) -> set:
"""Cleans the text ready to be checked
Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
+ text = self.mh.strip_punctuation(text)
- text = self.mh.strip_puncuation(text)
text = text.split(" ")
text = filter(lambda x: len(x) > 2, text)
text = set(text)
return text
x = []
for word in text:
+ # poor mans lemmatisation
- # poor mans lemisation
# removes 's from the dict'
if word.endswith("'s"):
x.append(word[0:-2])
+ text = self.mh.strip_punctuation(x)
- text = self.mh.strip_puncuation(x)
# turns it all into lowercase and as a set
complete = set([word.lower() for word in x])
return complete
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 5===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 8===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 9===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 12===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 13===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 14===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 15===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
|
ciphey.basemods.Checkers.brandon/Brandon.check
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<19>:<del>
|
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def check(self, text: str) -> Optional[str]:
<0> """Checks to see if the text is in English
<1>
<2> Performs a decryption, but mainly parses the internal data packet and prints useful information.
<3>
<4> Args:
<5> text -> The text we use to perform analysis on
<6>
<7> Returns:
<8> bool -> True if the text is English, False otherwise.
<9>
<10> """
<11> logger.trace(f'In Language Checker with "{text}"')
<12> text = self.clean_text(text)
<13> logger.trace(f'Text split to "{text}"')
<14> if text == "":
<15> logger.trace("Returning None from Brandon as the text cleaned is none.")
<16> return None
<17>
<18> length_text = len(text)
<19>
<20>
<21> what_to_use = {}
<22>
<23> # this code decides what checker / threshold to use
<24> # if text is over or equal to maximum size, just use the maximum possible checker
<25> what_to_use = self.calculateWhatChecker(
<26> length_text, self.thresholds_phase1.keys()
<27> )
<28> logger.trace(self.thresholds_phase1)
<29> what_to_use = self.thresholds_phase1[str(what_to_use)]
<30> # def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
<31> if "check" in what_to_use:
<32> # perform check 1k words
<33> result = self.checker(
<34> text, what_to_use["check"], length_text, self.top1000Words
<35> )
<36> elif "stop" in what_to_</s>
|
===========below chunk 0===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def check(self, text: str) -> Optional[str]:
# offset: 1
# perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text, self.stopwords
)
elif "dict" in what_to_use:
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
# If result is None, no point doing it again in phase2
if not result:
return None
else:
logger.debug(f"It is neither stop or check, but instead {what_to_use}")
# return False if phase 1 fails
if not result:
return None
else:
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase2.keys()
)
what_to_use = self.thresholds_phase2[str(what_to_use)]
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
return "" if result else None
===========unchanged ref 0===========
at: ciphey.basemods.Checkers.brandon.Brandon
clean_text(self, text: str) -> set
clean_text(text: str) -> set
checker(self, text: str, threshold: float, text_length: int, var: set) -> bool
checker(text: str, threshold: float, text_length: int, var: set) -> bool
calculateWhatChecker(self, length_text, key)
calculateWhatChecker(length_text, key)
at: ciphey.basemods.Checkers.brandon.Brandon.__init__
self.thresholds_phase1 = phases["1"]
self.thresholds_phase2 = phases["2"]
self.top1000Words = config.get_resource(self._params().get("top1000"))
self.wordlist = config.get_resource(self._params()["wordlist"])
self.stopwords = config.get_resource(self._params().get("stopwords"))
at: ciphey.iface._modules.Checker
check(self, text: T) -> Optional[str]
===========changed ref 0===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def clean_text(self, text: str) -> set:
"""Cleans the text ready to be checked
Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
+ text = self.mh.strip_punctuation(text)
- text = self.mh.strip_puncuation(text)
text = text.split(" ")
text = filter(lambda x: len(x) > 2, text)
text = set(text)
return text
x = []
for word in text:
+ # poor mans lemmatisation
- # poor mans lemisation
# removes 's from the dict'
if word.endswith("'s"):
x.append(word[0:-2])
+ text = self.mh.strip_punctuation(x)
- text = self.mh.strip_puncuation(x)
# turns it all into lowercase and as a set
complete = set([word.lower() for word in x])
return complete
===========changed ref 1===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
"""Given text determine if it passes checker
+ The checker uses the variable passed to it. I.E. Stopwords list, 1k words, dictionary
- The checker uses the vairable passed to it. I.E. Stopwords list, 1k words, dictionary
Args:
text -> The text to check
threshold -> at what point do we return True? The percentage of text that is in var before we return True
text_length -> the length of the text
+ var -> the variable we are checking against. Stopwords list, 1k words list, dictionary list.
- var -> the variable we are checking against. Stopwords list, 1k words list, dictionray list.
Returns:
boolean -> True for it passes the test, False for it fails the test."""
if text is None:
+ logger.trace("Checker's text is None, so returning False")
- logger.trace(f"Checker's text is None, so returning False")
return False
if var is None:
+ logger.trace("Checker's input var is None, so returning False")
- logger.trace(f"Checker's input var is None, so returning False")
return False
percent = ceil(text_length * threshold)
logger.trace(f"Checker's chunks are size {percent}")
meet_threshold = 0
location = 0
end = percent
if text_length <= 0:
return False
while location <= text_length:
# chunks the text, so only gets THRESHOLD chunks of text at a time
text = list(text)
to_analyse = text[location:end]
</s>
===========changed ref 2===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
# offset: 1
<s> of text at a time
text = list(text)
to_analyse = text[location:end]
logger.trace(f"To analyse is {to_analyse}")
for word in to_analyse:
# if word is a stopword, + 1 to the counter
if word in var:
logger.trace(
f"{word} is in var, which means I am +=1 to the meet_threshold which is {meet_threshold}"
)
meet_threshold += 1
meet_threshold_percent = meet_threshold / text_length
if meet_threshold_percent >= threshold:
logger.trace(
f"Returning true since the percentage is {meet_threshold / text_length} and the threshold is {threshold}"
)
# if we meet the threshold, return True
# otherwise, go over again until we do
# We do this in the for loop because if we're at 24% and THRESHOLD is 25
# we don't want to wait THRESHOLD to return true, we want to return True ASAP
return True
location = end
end = end + percent
logger.trace(
f"The language proportion {meet_threshold_percent} is under the threshold {threshold}"
)
return False
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
|
ciphey.basemods.Checkers.brandon/Brandon.calculateWhatChecker
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<5>:<add> In this way, we find the absolute lowest checker / percentage threshold.
<del> In this way, we find the absolute lowest checker / percentage threshold.
|
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def calculateWhatChecker(self, length_text, key):
<0> """Calculates what threshold / checker to use
<1>
<2> If the length of the text is over the maximum sentence length, use the last checker / threshold
<3> Otherwise, traverse the keys backwards until we find a key range that does not fit.
<4> So we traverse backwards and see if the sentence length is between current - 1 and current
<5> In this way, we find the absolute lowest checker / percentage threshold.
<6> We traverse backwards because if the text is longer than the max sentence length, we already know.
<7> In total, the keys are only 5 items long or so. It is not expensive to move backwards, nor is it expensive to move forwards.
<8>
<9> Args:
<10> length_text -> The length of the text
<11> key -> What key we want to use. I.E. Phase1 keys, Phase2 keys.
<12> Returns:
<13> what_to_use -> the key of the lowest checker."""
<14>
<15> _keys = list(key)
<16> _keys = list(map(int, _keys))
<17> if length_text >= int(_keys[-1]):
<18> what_to_use = list(key)[_keys.index(_keys[-1])]
<19> else:
<20> # this algorithm finds the smallest possible fit for the text
<21> for counter, i in reversed(list(enumerate(_keys))):
<22> # [0, 110, 150]
<23> if i <= length_text:
<24> what_to_use = i
<25> return what_to_use
<26>
|
===========changed ref 0===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def clean_text(self, text: str) -> set:
"""Cleans the text ready to be checked
Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
+ text = self.mh.strip_punctuation(text)
- text = self.mh.strip_puncuation(text)
text = text.split(" ")
text = filter(lambda x: len(x) > 2, text)
text = set(text)
return text
x = []
for word in text:
+ # poor mans lemmatisation
- # poor mans lemisation
# removes 's from the dict'
if word.endswith("'s"):
x.append(word[0:-2])
+ text = self.mh.strip_punctuation(x)
- text = self.mh.strip_puncuation(x)
# turns it all into lowercase and as a set
complete = set([word.lower() for word in x])
return complete
===========changed ref 1===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def check(self, text: str) -> Optional[str]:
"""Checks to see if the text is in English
Performs a decryption, but mainly parses the internal data packet and prints useful information.
Args:
text -> The text we use to perform analysis on
Returns:
bool -> True if the text is English, False otherwise.
"""
logger.trace(f'In Language Checker with "{text}"')
text = self.clean_text(text)
logger.trace(f'Text split to "{text}"')
if text == "":
logger.trace("Returning None from Brandon as the text cleaned is none.")
return None
length_text = len(text)
-
what_to_use = {}
# this code decides what checker / threshold to use
# if text is over or equal to maximum size, just use the maximum possible checker
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase1.keys()
)
logger.trace(self.thresholds_phase1)
what_to_use = self.thresholds_phase1[str(what_to_use)]
# def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
if "check" in what_to_use:
# perform check 1k words
result = self.checker(
text, what_to_use["check"], length_text, self.top1000Words
)
elif "stop" in what_to_use:
# perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text,</s>
===========changed ref 2===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def check(self, text: str) -> Optional[str]:
# offset: 1
<s> perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text, self.stopwords
)
elif "dict" in what_to_use:
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
# If result is None, no point doing it again in phase2
if not result:
return None
else:
logger.debug(f"It is neither stop or check, but instead {what_to_use}")
# return False if phase 1 fails
if not result:
return None
else:
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase2.keys()
)
what_to_use = self.thresholds_phase2[str(what_to_use)]
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
return "" if result else None
===========changed ref 3===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
"""Given text determine if it passes checker
+ The checker uses the variable passed to it. I.E. Stopwords list, 1k words, dictionary
- The checker uses the vairable passed to it. I.E. Stopwords list, 1k words, dictionary
Args:
text -> The text to check
threshold -> at what point do we return True? The percentage of text that is in var before we return True
text_length -> the length of the text
+ var -> the variable we are checking against. Stopwords list, 1k words list, dictionary list.
- var -> the variable we are checking against. Stopwords list, 1k words list, dictionray list.
Returns:
boolean -> True for it passes the test, False for it fails the test."""
if text is None:
+ logger.trace("Checker's text is None, so returning False")
- logger.trace(f"Checker's text is None, so returning False")
return False
if var is None:
+ logger.trace("Checker's input var is None, so returning False")
- logger.trace(f"Checker's input var is None, so returning False")
return False
percent = ceil(text_length * threshold)
logger.trace(f"Checker's chunks are size {percent}")
meet_threshold = 0
location = 0
end = percent
if text_length <= 0:
return False
while location <= text_length:
# chunks the text, so only gets THRESHOLD chunks of text at a time
text = list(text)
to_analyse = text[location:end]
</s>
|
ciphey.basemods.Checkers.brandon/Brandon.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<1>:<add> "top1000": ParamSpec(
<del> "top1000": ciphey.iface.ParamSpec(
<6>:<add> "wordlist": ParamSpec(
<del> "wordlist": ciphey.iface.ParamSpec(
<11>:<add> "stopwords": ParamSpec(
<del> "stopwords": ciphey.iface.ParamSpec(
<16>:<add> "threshold": ParamSpec(
<del> "threshold": ciphey.iface.ParamSpec(
<21>:<add> "phases": ParamSpec(
<del> "phases": ciphey.iface.ParamSpec(
|
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, ciphey.iface.ParamSpec]]:
<0> return {
<1> "top1000": ciphey.iface.ParamSpec(
<2> desc="A wordlist of the top 1000 words",
<3> req=False,
<4> default="cipheydists::list::english1000",
<5> ),
<6> "wordlist": ciphey.iface.ParamSpec(
<7> desc="A wordlist of all the words",
<8> req=False,
<9> default="cipheydists::list::english",
<10> ),
<11> "stopwords": ciphey.iface.ParamSpec(
<12> desc="A wordlist of StopWords",
<13> req=False,
<14> default="cipheydists::list::englishStopWords",
<15> ),
<16> "threshold": ciphey.iface.ParamSpec(
<17> desc="The minimum proportion (between 0 and 1) that must be in the dictionary",
<18> req=False,
<19> default=0.45,
<20> ),
<21> "phases": ciphey.iface.ParamSpec(
<22> desc="Language-specific phase thresholds",
<23> req=False,
<24> default="cipheydists::brandon::english",
<25> ),
<26> }
<27>
|
===========unchanged ref 0===========
at: ciphey.iface._modules
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def calculateWhatChecker(self, length_text, key):
"""Calculates what threshold / checker to use
If the length of the text is over the maximum sentence length, use the last checker / threshold
Otherwise, traverse the keys backwards until we find a key range that does not fit.
So we traverse backwards and see if the sentence length is between current - 1 and current
+ In this way, we find the absolute lowest checker / percentage threshold.
- In this way, we find the absolute lowest checker / percentage threshold.
We traverse backwards because if the text is longer than the max sentence length, we already know.
In total, the keys are only 5 items long or so. It is not expensive to move backwards, nor is it expensive to move forwards.
Args:
length_text -> The length of the text
key -> What key we want to use. I.E. Phase1 keys, Phase2 keys.
Returns:
what_to_use -> the key of the lowest checker."""
_keys = list(key)
_keys = list(map(int, _keys))
if length_text >= int(_keys[-1]):
what_to_use = list(key)[_keys.index(_keys[-1])]
else:
# this algorithm finds the smallest possible fit for the text
for counter, i in reversed(list(enumerate(_keys))):
# [0, 110, 150]
if i <= length_text:
what_to_use = i
return what_to_use
===========changed ref 1===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def clean_text(self, text: str) -> set:
"""Cleans the text ready to be checked
Strips punctuation, makes it lower case, turns it into a set separated by spaces, removes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
+ text = self.mh.strip_punctuation(text)
- text = self.mh.strip_puncuation(text)
text = text.split(" ")
text = filter(lambda x: len(x) > 2, text)
text = set(text)
return text
x = []
for word in text:
+ # poor mans lemmatisation
- # poor mans lemisation
# removes 's from the dict'
if word.endswith("'s"):
x.append(word[0:-2])
+ text = self.mh.strip_punctuation(x)
- text = self.mh.strip_puncuation(x)
# turns it all into lowercase and as a set
complete = set([word.lower() for word in x])
return complete
===========changed ref 2===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def check(self, text: str) -> Optional[str]:
"""Checks to see if the text is in English
Performs a decryption, but mainly parses the internal data packet and prints useful information.
Args:
text -> The text we use to perform analysis on
Returns:
bool -> True if the text is English, False otherwise.
"""
logger.trace(f'In Language Checker with "{text}"')
text = self.clean_text(text)
logger.trace(f'Text split to "{text}"')
if text == "":
logger.trace("Returning None from Brandon as the text cleaned is none.")
return None
length_text = len(text)
-
what_to_use = {}
# this code decides what checker / threshold to use
# if text is over or equal to maximum size, just use the maximum possible checker
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase1.keys()
)
logger.trace(self.thresholds_phase1)
what_to_use = self.thresholds_phase1[str(what_to_use)]
# def checker(self, text: str, threshold: float, text_length: int, var: set) -> bool:
if "check" in what_to_use:
# perform check 1k words
result = self.checker(
text, what_to_use["check"], length_text, self.top1000Words
)
elif "stop" in what_to_use:
# perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text,</s>
===========changed ref 3===========
# module: ciphey.basemods.Checkers.brandon
@registry.register
+ class Brandon(Checker[str]):
- class Brandon(ciphey.iface.Checker[str]):
def check(self, text: str) -> Optional[str]:
# offset: 1
<s> perform stopwords
result = self.checker(
text, what_to_use["stop"], length_text, self.stopwords
)
elif "dict" in what_to_use:
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
# If result is None, no point doing it again in phase2
if not result:
return None
else:
logger.debug(f"It is neither stop or check, but instead {what_to_use}")
# return False if phase 1 fails
if not result:
return None
else:
what_to_use = self.calculateWhatChecker(
length_text, self.thresholds_phase2.keys()
)
what_to_use = self.thresholds_phase2[str(what_to_use)]
result = self.checker(text, what_to_use["dict"], length_text, self.wordlist)
return "" if result else None
|
ciphey.basemods.Checkers.format/JsonChecker.check
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> logger.trace("Trying json checker")
<del> logger.trace(f"Trying json checker")
|
# module: ciphey.basemods.Checkers.format
@registry.register
class JsonChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
<0> logger.trace(f"Trying json checker")
<1>
<2> # https://github.com/Ciphey/Ciphey/issues/389
<3> if text.isdigit():
<4> return None
<5>
<6> try:
<7> json.loads(text)
<8> return ""
<9> except ValueError:
<10> return None
<11>
|
===========unchanged ref 0===========
at: ciphey.iface._modules
T = TypeVar("T")
at: ciphey.iface._modules.Checker
check(self, text: T) -> Optional[str]
at: json
loads(s: Union[str, bytes], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 13===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 14===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 16===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 20===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 23===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 25===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
|
ciphey.basemods.Decoders.url/Url.decode
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<3>:<add> logger.trace("Attempting URL")
<add> result = ""
<6>:<add> logger.debug(f"URL successful, returning '{result}'")
<10>:<add> logger.trace("Failed to decode URL")
|
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
def decode(self, ctext: T) -> Optional[U]:
<0> """
<1> Performs URL decoding
<2> """
<3> try:
<4> result = unquote_plus(ctext, errors="strict")
<5> if result != ctext:
<6> return result
<7> else:
<8> return None
<9> except Exception:
<10> return None
<11>
|
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 13===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 14===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 15===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 16===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 20===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 23===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 25===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
===========changed ref 26===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
===========changed ref 27===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+ self.translate = config.get_resource(self._params()["dict"], Translation)
+
|
ciphey.basemods.Decoders.url/Url.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> return None
<del> pass
|
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
<0> pass
<1>
|
===========changed ref 0===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs URL decoding
"""
+ logger.trace("Attempting URL")
+ result = ""
try:
result = unquote_plus(ctext, errors="strict")
if result != ctext:
+ logger.debug(f"URL successful, returning '{result}'")
return result
else:
return None
except Exception:
+ logger.trace("Failed to decode URL")
return None
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 6===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 9===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 14===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 17===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 26===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
|
tests.generate_tests/test_generator.make_test_lc_true_template
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<8>:<add> assert result["IsPlaintext?"] == True
<del> assert result["IsPlaintext?"] == True
|
# module: tests.generate_tests
class test_generator:
def make_test_lc_true_template(self, cipher):
<0> id = self.randomString(8)
<1> return f"""
<2> def test_{cipher['Encrypted Texts']['CipherUsed']}_{id}():
<3> # {cipher}
<4> cfg = make_default_config('''{cipher['Encrypted Texts']['EncryptedText']}''')
<5> cfg["debug"] = "TRACE"
<6> result = main(cfg)
<7>
<8> assert result["IsPlaintext?"] == True
<9> """
<10>
|
===========unchanged ref 0===========
at: tests.generate_tests.test_generator
randomString(stringLength)
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 14===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 17===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 26===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
===========changed ref 27===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
|
tests.lukas/XY_encrypt.encrypt
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<10>:<add> self.ctext = self.randomizer() if self.randomize is True else self.ctext
<del> self.ctext = self.randomizer() if self.randomize == True else self.ctext
|
# module: tests.lukas
class XY_encrypt:
def encrypt(self):
<0> self.ctext = self.to_binary().replace(" ", "")
<1>
<2> if self.key:
<3> one, two = self.key[0], self.key[1]
<4> else:
<5> one, two = random.choice(self.ASCII), random.choice(self.ASCII)
<6>
<7> self.ctext = self.ctext.replace(str(int(self.flip)), one).replace(
<8> str(int(not self.flip)), two
<9> )
<10> self.ctext = self.randomizer() if self.randomize == True else self.ctext
<11>
<12> return self.ctext
<13>
|
===========unchanged ref 0===========
at: random
choice = _inst.choice
at: tests.lukas.XY_encrypt
randomizer()
to_binary()
at: tests.lukas.XY_encrypt.__init__
self.ASCII = cipheydists.get_charset("asciiTable")
self.ctext = ""
self.flip = flip
self.randomize = randomize
self.key = key
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 14===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 17===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 26===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
|
tests.test_main/test_plaintext
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> res = decrypt(Config().library_default().complete_config(), answer_str)
<del> res = decrypt(Config.library_default().complete_config(), answer_str)
<1>:<del>
<2>:<del> print(res)
<3>:<del>
|
# module: tests.test_main
def test_plaintext():
<0> res = decrypt(Config.library_default().complete_config(), answer_str)
<1>
<2> print(res)
<3>
<4> assert res == answer_str
<5>
|
===========unchanged ref 0===========
at: ciphey.ciphey
decrypt(config: iface.Config, ctext: Any) -> Union[str, bytes]
at: ciphey.iface._config
Config()
at: ciphey.iface._config.Config
complete_config() -> "Config"
library_default()
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 14===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 17===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 26===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
===========changed ref 27===========
# module: ciphey.basemods.Checkers.gtest
@registry.register
class GTestChecker(Checker[str]):
def check(self, text: T) -> Optional[str]:
+ logger.trace("Trying entropy checker")
- logger.trace(f"Trying entropy checker")
pass
|
tests.test_main/test_base69
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<2>:<add> "kAZAtABBeB8A-AoB8ADBNAhBLA1AFBgA0AXBfBGATAVAFBgAwAWBHB<ACAkA-AnB0AVBnBNBDARAZBiBQAYAtAhBhABA<ArB4AbAMANBDAFAXBfBQAdAOAmArAUAAA2=",
<del> "0110100001100101011011000110110001101111",
<4>:<add> assert res == answer_str
<del> assert res != "0110100001100101011011000110110001101111"
|
# module: tests.test_main
def test_base69():
<0> res = decrypt(
<1> Config().library_default().complete_config(),
<2> "0110100001100101011011000110110001101111",
<3> )
<4> assert res != "0110100001100101011011000110110001101111"
<5>
|
===========unchanged ref 0===========
at: tests.test_main.test_json_problem
res = decrypt(
Config().library_default().complete_config(),
"0110100001100101011011000110110001101111",
)
===========changed ref 0===========
# module: tests.test_main
+ def test_json_problem():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "0110100001100101011011000110110001101111",
+ )
+ assert res != "0110100001100101011011000110110001101111"
+
===========changed ref 1===========
# module: tests.test_main
+ def test_hexadecimal():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "48 65 6c 6c 6f 20 6d 79 20 6e 61 6d 65 20 69 73 20 62 65 65 20 61 6e 64 20 49 20 6c 69 6b 65 20 64 6f 67 20 61 6e 64 20 61 70 70 6c 65 20 61 6e 64 20 74 72 65 65",
+ )
+
+ assert res.lower() == answer_str.lower()
+
===========changed ref 2===========
# module: tests.test_main
- def leet():
- res = decrypt(
- Config().library_default().complete_config(),
- "|-|3770 my nam3 is 833 and 1 lIke D06 AND 4|>|>13 4 7R33",
- )
- assert res.lower() == answer_str
-
===========changed ref 3===========
# module: tests.test_main
- def test_base58_normal():
- res = decrypt(
- Config().library_default().complete_config(),
- "6qYhNwsP46Mn4gy6gyANfsMm2icAxGFA6gnFjVm9phYHeby7PZm3vthiXxSU77teQgTFGbHETn",
- )
- assert res.lower() == answer_str.lower()
-
===========changed ref 4===========
# module: tests.test_main
+ def test_base85():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "87cURD]inB+DtV)AKY].+C\\nn+CT.u+A!\\lBkq9&A8c*'@;]Tu@;p1%AKYE!A0>u7ARt",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 5===========
# module: tests.test_main
- def test_hex():
- res = decrypt(
- Config().library_default().complete_config(),
- "48656c6c6f206d79206e616d652069732062656520616e642049206c696b6520646f6720616e64206170706c6520616e6420"
- "74726565",
- )
-
- assert res == answer_str
-
===========changed ref 6===========
# module: tests.test_main
+ def test_base58_bitcoin():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "6qYhNwsP46Mn4gy6gyANfsMm2icAxGFA6gnFjVm9phYHeby7PZm3vthiXxSU77teQgTFGbHETn",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 7===========
# module: tests.test_main
def test_plaintext():
+ res = decrypt(Config().library_default().complete_config(), answer_str)
- res = decrypt(Config.library_default().complete_config(), answer_str)
-
- print(res)
-
assert res == answer_str
===========changed ref 8===========
# module: tests.test_main
+ def test_base32():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "JBSWY3DPEBWXSIDOMFWWKIDJOMQGEZLFEBQW4ZBAJEQGY2LLMUQGI33HEBQW4ZBAMFYHA3DFEBQW4ZBAORZGKZI=",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 9===========
# module: tests.test_main
- def test_XandY():
- res = decrypt(
- Config().library_default().complete_config(),
- "xDDxDxxx xDDxxDxD xDDxDDxx xDDxDDxx xDDxDDDD xxDxxxxx xDDxDDxD xDDDDxxD xxDxxxxx xDDxDDDx xDDxxxxD xDDxDDxD xDDxxDxD xxDxxxxx xDDxDxxD xDDDxxDD xxDxxxxx xDDxxxDx xDDxxDxD xDDxxDxD xxDxxxxx xDDxxxxD xDDxDDDx xDDxxDxx xxDxxxxx xDxxDxxD xxDxxxxx xDDxDDxx xDDxDxxD xDDxDxDD xDDxxDxD xxDxxxxx xDDxxDxx xDDxDDDD xDDxxDDD xxDxxxxx xDDxxxxD xDDxDDDx xDDxxDxx xxDxxxxx xDDxxxxD xDDDxxxx xDDDxxxx xDDxDDxx xDDxxDxD xxDxxxxx xDDxxxxD xDDxDDDx xDDxxDxx xxDxxxxx xDDDxDxx xDDDxxDx xDDxxDxD xDDxxDxD",
- )
- assert res.lower() == answer_str.lower()
-
===========changed ref 10===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 11===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 12===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 14===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 15===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 17===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 18===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 19===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
|
tests.test_main/test_brandon
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
# module: tests.test_main
def test_brandon():
<0> res = decrypt(
<1> Config().library_default().complete_config(),
<2> "R hvv blf tzgsvi yvuliv nv...sfmtib...gviirurvw... Xofgxsrmt blfi yzyvh gl blfi yivzhg. Vnkvili Vnsbi srh nzixsvw srh ovtrlmh rmgl lfi ozmwh... Ozrw hrvtv gl vevib uligivhh uiln sviv gl gsv Yofv Nlfmgzrmh. Izyrw zmw izevmlfh, sv yrgvh zmw yrgvh zdzb. Nvm lu gsv Mligs, blf hgzmw zg gsv kivxrkrxv. Blfi prmth szev uzrovw blf, hl mld blf gfim gl gsv tlwh! Zmw bvg blf wl mlg kovzw? Blf wl mlg pmvvo gl wfhg blfi svzwh drgs zhs? Rmhgvzw blf dzro, Dsb szev gsv tlwh ulihzpvm fh? Dv nfhg ollp rmgl gsv girzoh dv uzrovw olmt ztl! Rm z grnv kzhhvw, lfi dliow rmgvigdrmvw drgs zmlgsvi gsilfts zm fksvzezo hxslozih xzoo gsv Xlmqfmxgrlm lu gsv Hksvivh... Gsv tlwh zooldvw fmslob ulixvh gl hork rmgl lfi wlnzrm. Gsv luuhkirmt lu gszg xzgzxobhn dzh gsv mvuvirlfh ulixv xzoovw nztrx... Bvg dv wrw mlg yzmrhs rg, rmhgvzw hgfwbrmt gsv erov zixzmv uli lfi kldvi zmw dvzogs! Zmw gsv nlmhgvih zg lfi wlli...gsv</s>
|
===========below chunk 0===========
# module: tests.test_main
def test_brandon():
# offset: 1
)
assert True
===========unchanged ref 0===========
at: ciphey.ciphey
decrypt(config: iface.Config, ctext: Any) -> Union[str, bytes]
at: ciphey.iface._config
Config()
at: ciphey.iface._config.Config
complete_config() -> "Config"
library_default()
===========changed ref 0===========
# module: tests.test_main
+ def test_json_problem():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "0110100001100101011011000110110001101111",
+ )
+ assert res != "0110100001100101011011000110110001101111"
+
===========changed ref 1===========
# module: tests.test_main
+ def test_leetspeak():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "|-|3ll0 my n4m3 1s 833 4nd 1 l1k3 D06 4ND 4ppl3 4nd 7R33",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 2===========
# module: tests.test_main
+ def test_morse_code():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ ".... . .-.. .-.. ---/-- -.--/-. .- -- ./.. .../-... . ./.- -. -../../.-.. .. -.- ./-.. --- --./.- -. -../.- .--. .--. .-.. ./.- -. -../- .-. . .",
+ )
+ assert res == answer_str.upper()
+
===========changed ref 3===========
# module: tests.test_main
+ def test_hexadecimal():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "48 65 6c 6c 6f 20 6d 79 20 6e 61 6d 65 20 69 73 20 62 65 65 20 61 6e 64 20 49 20 6c 69 6b 65 20 64 6f 67 20 61 6e 64 20 61 70 70 6c 65 20 61 6e 64 20 74 72 65 65",
+ )
+
+ assert res.lower() == answer_str.lower()
+
===========changed ref 4===========
# module: tests.test_main
- def leet():
- res = decrypt(
- Config().library_default().complete_config(),
- "|-|3770 my nam3 is 833 and 1 lIke D06 AND 4|>|>13 4 7R33",
- )
- assert res.lower() == answer_str
-
===========changed ref 5===========
# module: tests.test_main
- def test_base58_normal():
- res = decrypt(
- Config().library_default().complete_config(),
- "6qYhNwsP46Mn4gy6gyANfsMm2icAxGFA6gnFjVm9phYHeby7PZm3vthiXxSU77teQgTFGbHETn",
- )
- assert res.lower() == answer_str.lower()
-
===========changed ref 6===========
# module: tests.test_main
def test_base69():
res = decrypt(
Config().library_default().complete_config(),
+ "kAZAtABBeB8A-AoB8ADBNAhBLA1AFBgA0AXBfBGATAVAFBgAwAWBHB<ACAkA-AnB0AVBnBNBDARAZBiBQAYAtAhBhABA<ArB4AbAMANBDAFAXBfBQAdAOAmArAUAAA2=",
- "0110100001100101011011000110110001101111",
)
+ assert res == answer_str
- assert res != "0110100001100101011011000110110001101111"
===========changed ref 7===========
# module: tests.test_main
+ def test_base85():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "87cURD]inB+DtV)AKY].+C\\nn+CT.u+A!\\lBkq9&A8c*'@;]Tu@;p1%AKYE!A0>u7ARt",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 8===========
# module: tests.test_main
- def test_hex():
- res = decrypt(
- Config().library_default().complete_config(),
- "48656c6c6f206d79206e616d652069732062656520616e642049206c696b6520646f6720616e64206170706c6520616e6420"
- "74726565",
- )
-
- assert res == answer_str
-
===========changed ref 9===========
# module: tests.test_main
+ def test_base58_bitcoin():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "6qYhNwsP46Mn4gy6gyANfsMm2icAxGFA6gnFjVm9phYHeby7PZm3vthiXxSU77teQgTFGbHETn",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 10===========
# module: tests.test_main
def test_plaintext():
+ res = decrypt(Config().library_default().complete_config(), answer_str)
- res = decrypt(Config.library_default().complete_config(), answer_str)
-
- print(res)
-
assert res == answer_str
===========changed ref 11===========
# module: tests.test_main
+ def test_base32():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "JBSWY3DPEBWXSIDOMFWWKIDJOMQGEZLFEBQW4ZBAJEQGY2LLMUQGI33HEBQW4ZBAMFYHA3DFEBQW4ZBAORZGKZI=",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 12===========
# module: tests.test_main
- def test_XandY():
- res = decrypt(
- Config().library_default().complete_config(),
- "xDDxDxxx xDDxxDxD xDDxDDxx xDDxDDxx xDDxDDDD xxDxxxxx xDDxDDxD xDDDDxxD xxDxxxxx xDDxDDDx xDDxxxxD xDDxDDxD xDDxxDxD xxDxxxxx xDDxDxxD xDDDxxDD xxDxxxxx xDDxxxDx xDDxxDxD xDDxxDxD xxDxxxxx xDDxxxxD xDDxDDDx xDDxxDxx xxDxxxxx xDxxDxxD xxDxxxxx xDDxDDxx xDDxDxxD xDDxDxDD xDDxxDxD xxDxxxxx xDDxxDxx xDDxDDDD xDDxxDDD xxDxxxxx xDDxxxxD xDDxDDDx xDDxxDxx xxDxxxxx xDDxxxxD xDDDxxxx xDDDxxxx xDDxDDxx xDDxxDxD xxDxxxxx xDDxxxxD xDDxDDDx xDDxxDxx xxDxxxxx xDDDxDxx xDDDxxDx xDDxxDxD xDDxxDxD",
- )
- assert res.lower() == answer_str.lower()
-
===========changed ref 13===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 14===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 15===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
|
|
tests.test_main/test_soundex
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<4>:<add> assert res.lower() == "history is in the past"
<del> assert "history is in the past" == res
|
# module: tests.test_main
def test_soundex():
<0> res = decrypt(
<1> Config().library_default().complete_config(),
<2> "H236 I200 I500 T000 P230",
<3> )
<4> assert "history is in the past" == res
<5>
|
===========unchanged ref 0===========
at: ciphey.iface._config
Config()
at: ciphey.iface._config.Config
complete_config() -> "Config"
library_default()
at: tests.test_main.test_tap_code
res = decrypt(
Config().library_default().complete_config(),
"4,4 1,5 4,3 4,4 3,4 3,3 1,5 4,4 5,2 3,4 4,4 2,3 4,2 1,5 1,5",
)
===========changed ref 0===========
# module: tests.test_main
+ def test_rot47():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "$A9:?I @7 3=24< BF2CEK[ ;F586 >J G@H",
+ )
+ assert res == "Sphinx of black quartz, judge my vow"
+
===========changed ref 1===========
# module: tests.test_main
+ def test_reversed_text():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "eert dna elppa dna god ekil I dna eeb si eman ym olleH",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 2===========
# module: tests.test_main
+ def test_json_problem():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "0110100001100101011011000110110001101111",
+ )
+ assert res != "0110100001100101011011000110110001101111"
+
===========changed ref 3===========
# module: tests.test_main
+ def test_leetspeak():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "|-|3ll0 my n4m3 1s 833 4nd 1 l1k3 D06 4ND 4ppl3 4nd 7R33",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 4===========
# module: tests.test_main
+ def test_octal():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "110 145 154 154 157 40 155 171 40 156 141 155 145 40 151 163 40 142 145 145 40 141 156 144 40 111 40 154 151 153 145 40 144 157 147 40 141 156 144 40 141 160 160 154 145 40 141 156 144 40 164 162 145 145",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 5===========
# module: tests.test_main
+ def test_morse_code():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ ".... . .-.. .-.. ---/-- -.--/-. .- -- ./.. .../-... . ./.- -. -../../.-.. .. -.- ./-.. --- --./.- -. -../.- .--. .--. .-.. ./.- -. -../- .-. . .",
+ )
+ assert res == answer_str.upper()
+
===========changed ref 6===========
# module: tests.test_main
+ def test_hexadecimal():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "48 65 6c 6c 6f 20 6d 79 20 6e 61 6d 65 20 69 73 20 62 65 65 20 61 6e 64 20 49 20 6c 69 6b 65 20 64 6f 67 20 61 6e 64 20 61 70 70 6c 65 20 61 6e 64 20 74 72 65 65",
+ )
+
+ assert res.lower() == answer_str.lower()
+
===========changed ref 7===========
# module: tests.test_main
- def leet():
- res = decrypt(
- Config().library_default().complete_config(),
- "|-|3770 my nam3 is 833 and 1 lIke D06 AND 4|>|>13 4 7R33",
- )
- assert res.lower() == answer_str
-
===========changed ref 8===========
# module: tests.test_main
- def test_base58_normal():
- res = decrypt(
- Config().library_default().complete_config(),
- "6qYhNwsP46Mn4gy6gyANfsMm2icAxGFA6gnFjVm9phYHeby7PZm3vthiXxSU77teQgTFGbHETn",
- )
- assert res.lower() == answer_str.lower()
-
===========changed ref 9===========
# module: tests.test_main
def test_base69():
res = decrypt(
Config().library_default().complete_config(),
+ "kAZAtABBeB8A-AoB8ADBNAhBLA1AFBgA0AXBfBGATAVAFBgAwAWBHB<ACAkA-AnB0AVBnBNBDARAZBiBQAYAtAhBhABA<ArB4AbAMANBDAFAXBfBQAdAOAmArAUAAA2=",
- "0110100001100101011011000110110001101111",
)
+ assert res == answer_str
- assert res != "0110100001100101011011000110110001101111"
===========changed ref 10===========
# module: tests.test_main
+ def test_base85():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "87cURD]inB+DtV)AKY].+C\\nn+CT.u+A!\\lBkq9&A8c*'@;]Tu@;p1%AKYE!A0>u7ARt",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 11===========
# module: tests.test_main
- def test_hex():
- res = decrypt(
- Config().library_default().complete_config(),
- "48656c6c6f206d79206e616d652069732062656520616e642049206c696b6520646f6720616e64206170706c6520616e6420"
- "74726565",
- )
-
- assert res == answer_str
-
===========changed ref 12===========
# module: tests.test_main
+ def test_base58_bitcoin():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "6qYhNwsP46Mn4gy6gyANfsMm2icAxGFA6gnFjVm9phYHeby7PZm3vthiXxSU77teQgTFGbHETn",
+ )
+ assert res.lower() == answer_str.lower()
+
===========changed ref 13===========
# module: tests.test_main
def test_plaintext():
+ res = decrypt(Config().library_default().complete_config(), answer_str)
- res = decrypt(Config.library_default().complete_config(), answer_str)
-
- print(res)
-
assert res == answer_str
===========changed ref 14===========
# module: tests.test_main
+ def test_base32():
+ res = decrypt(
+ Config().library_default().complete_config(),
+ "JBSWY3DPEBWXSIDOMFWWKIDJOMQGEZLFEBQW4ZBAJEQGY2LLMUQGI33HEBQW4ZBAMFYHA3DFEBQW4ZBAORZGKZI=",
+ )
+ assert res.lower() == answer_str.lower()
+
|
ciphey.basemods.Checkers.quorum/Quorum.__init__
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<13>:<del> self.checkers.append(
<14>:<del> ciphey.iface._registry.get_named(i, ciphey.iface.Checker[T])
<15>:<del> )
<16>:<add> self.checkers.append(_registry.get_named(i, Checker[T]))
|
# module: ciphey.basemods.Checkers.quorum
+ class Quorum(Generic[T], Checker[T]):
- class Quorum(Generic[T], ciphey.iface.Checker[T]):
def __init__(self, config: Config):
<0> super().__init__(config)
<1>
<2> if self._params().k is None:
<3> k = len(self._params()["checker"])
<4> # These checks need to be separate, to make sure that we do not have zero members
<5> if self._params().k == 0 or self._params().k > len(self._params()["checker"]):
<6> raise IndexError(
<7> "k must be between 0 and the number of checkers (inclusive)"
<8> )
<9>
<10> self.checkers = []
<11> for i in self._params()["checker"]:
<12> # This enforces type consistency
<13> self.checkers.append(
<14> ciphey.iface._registry.get_named(i, ciphey.iface.Checker[T])
<15> )
<16>
|
===========unchanged ref 0===========
at: ciphey.iface._modules
T = TypeVar("T")
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
Checker(config: Config)
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
_params()
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 14===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 17===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 26===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
|
ciphey.basemods.Decoders.binary/Binary.decode
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<1>:<add> ctext = re.sub(r"[^\S \n]", " ", ctext, flags=re.UNICODE)
<del> text = re.sub(r"[^\S \n]", " ", text, flags=re.UNICODE)
<2>:<add> ctext = ctext.replace("\n", " ")
<del> text = text.replace("\n", " ")
<4>:<add> existing_split = self.try_split(ctext.split(" "))
<del> existing_split = self.try_split(text.split(" "))
<11>:<add> ctext = ctext.replace(" ", "")
<del> text = text.replace(" ", "")
<13>:<add> return self.try_split([ctext[i : i + 8] for i in range(0, len(ctext), 8)])
<del> return self.try_split([text[i : i + 8] for i in range(0, len(text), 8)])
|
# module: ciphey.basemods.Decoders.binary
@registry.register
+ class Binary(Decoder[str]):
- class Binary(ciphey.iface.Decoder[str, bytes]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: str) -> Optional[bytes]:
<0> try:
<1> text = re.sub(r"[^\S \n]", " ", text, flags=re.UNICODE)
<2> text = text.replace("\n", " ")
<3>
<4> existing_split = self.try_split(text.split(" "))
<5> if existing_split is not None:
<6> return existing_split
<7>
<8> # Now we try our own grouping
<9>
<10> # Remove final bit of whitespace
<11> text = text.replace(" ", "")
<12> # Split into bytes, and test
<13> return self.try_split([text[i : i + 8] for i in range(0, len(text), 8)])
<14> # Catch bad octal chars
<15> except ValueError:
<16> return None
<17>
|
===========unchanged ref 0===========
at: ciphey.basemods.Decoders.binary.Binary
try_split(split_text: List[str])
at: ciphey.iface._modules.Decoder
decode(self, ctext: T) -> Optional[U]
at: re
UNICODE = RegexFlag.UNICODE
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 14===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 15===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 16===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 17===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 21===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 24===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
|
ciphey.basemods.Decoders.binary/Binary.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> return None
<del> pass
|
# module: ciphey.basemods.Decoders.binary
@registry.register
+ class Binary(Decoder[str]):
- class Binary(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
<0> pass
<1>
|
===========unchanged ref 0===========
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: ciphey.basemods.Decoders.binary
@registry.register
+ class Binary(Decoder[str]):
- class Binary(ciphey.iface.Decoder[str, bytes]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: str) -> Optional[bytes]:
try:
+ ctext = re.sub(r"[^\S \n]", " ", ctext, flags=re.UNICODE)
- text = re.sub(r"[^\S \n]", " ", text, flags=re.UNICODE)
+ ctext = ctext.replace("\n", " ")
- text = text.replace("\n", " ")
+ existing_split = self.try_split(ctext.split(" "))
- existing_split = self.try_split(text.split(" "))
if existing_split is not None:
return existing_split
# Now we try our own grouping
# Remove final bit of whitespace
+ ctext = ctext.replace(" ", "")
- text = text.replace(" ", "")
# Split into bytes, and test
+ return self.try_split([ctext[i : i + 8] for i in range(0, len(ctext), 8)])
- return self.try_split([text[i : i + 8] for i in range(0, len(text), 8)])
# Catch bad octal chars
except ValueError:
return None
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 4===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 6===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 9===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 10===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 14===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 15===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 18===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 20===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 21===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
|
ciphey.basemods.Decoders.uuencode/Uuencode.decode
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<19>:<add> for _, value in enumerate(ctext_split):
<del> for i in range(0, len(ctext_split)):
<20>:<add> result += a2b_uu(value).decode("utf-8")
<del> result += a2b_uu(ctext_split[i]).decode("utf-8")
|
# module: ciphey.basemods.Decoders.uuencode
@registry.register
+ class Uuencode(Decoder[str]):
- class Uuencode(Decoder[str, str]):
def decode(self, ctext: T) -> Optional[U]:
<0> """
<1> UUEncode (Unix to Unix Encoding) is a symmetric encryption
<2> based on conversion of binary data (split into 6-bit blocks) into ASCII characters.
<3>
<4> This function decodes the input string 'ctext' if it has been encoded using 'uuencoder'
<5> It will return None otherwise
<6> """
<7> logger.trace("Attempting UUencode")
<8> result = ""
<9> try:
<10> # UUencoded messages may begin with prefix "begin" and end with suffix "end"
<11> # In that case, we use the codecs module in Python
<12> ctext_strip = ctext.strip()
<13> if ctext_strip.startswith("begin") and ctext_strip.endswith("end"):
<14> result = decode(bytes(ctext, "utf-8"), "uu").decode()
<15> else:
<16> # If there isn't a "being" prefix and "end" suffix, we use the binascii module instead
<17> # It is possible that the ctext has multiple lines, so convert each line and append
<18> ctext_split = list(filter(None, ctext.splitlines()))
<19> for i in range(0, len(ctext_split)):
<20> result += a2b_uu(ctext_split[i]).decode("utf-8")
<21> logger.debug(f"UUencode successful, returning '{result}'")
<22> return result
<23> except Exception:
<24> logger.trace("Failed to decode UUencode")
<25> return None
<26>
|
===========changed ref 0===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 7===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 8===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.binary
@registry.register
+ class Binary(Decoder[str]):
- class Binary(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 10===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 11===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 12===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 13===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 14===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 15===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 16===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 18===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 20===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 21===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def priority() -> float:
- return 0.015
-
===========changed ref 22===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "morse_code"
+
===========changed ref 23===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "base58_bitcoin"
+
===========changed ref 24===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 25===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ def __init__(self, config: Config):
+ super().__init__(config)
+
===========changed ref 26===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- def __init__(self, config: ciphey.iface.Config):
- super().__init__(config)
-
===========changed ref 27===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ # Not expected to show up often, but also very fast to check.
+ return 0.05
+
|
ciphey.basemods.Searchers.perfection/Perfection.getParams
|
Modified
|
Ciphey~Ciphey
|
5544e945c591d063a2541fd40991c1f81b729575
|
Code cleanup (#510)
|
<0>:<add> return None
<del> pass
|
# module: ciphey.basemods.Searchers.perfection
@registry.register
class Perfection(AuSearch):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
<0> pass
<1>
|
===========changed ref 0===========
+ # module: ciphey.basemods.Crackers.xandy
+
+
===========changed ref 1===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+
+
===========changed ref 2===========
+ # module: ciphey.basemods.Decoders.leetspeak
+
+
===========changed ref 3===========
+ # module: ciphey.basemods.Decoders.morse_code
+
+
===========changed ref 4===========
# module: ciphey.basemods.Decoders.multi_tap
- @registry.register
- class multiTap(Decoder[str, str]):
- @staticmethod
- def getParams() -> Optional[Dict[str, ParamSpec]]:
- pass
-
===========changed ref 5===========
# module: ciphey.basemods.Decoders.multi_tap
- @registry.register
- class multiTap(Decoder[str, str]):
- @staticmethod
- def getParams() -> Optional[Dict[str, ParamSpec]]:
- pass
-
===========changed ref 6===========
+ # module: ciphey.basemods.Decoders.base58_bitcoin
+ @registry.register
+ class Base58_bitcoin(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 7===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
- pass
-
===========changed ref 8===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
+
===========changed ref 9===========
# module: ciphey.basemods.Decoders.tap_code
+ @registry.register
+ class Tap_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.06
+
===========changed ref 10===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 11===========
+ # module: ciphey.basemods.Decoders.morse_code
+ @registry.register
+ class Morse_code(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.05
+
===========changed ref 12===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
+ @staticmethod
+ def score_utility() -> float:
+ return 1.5
+
===========changed ref 13===========
# module: ciphey.basemods.Decoders.multi_tap
- @registry.register
- class multiTap(Decoder[str, str]):
- @staticmethod
- def priority() -> float:
- return 0.05
-
===========changed ref 14===========
# module: ciphey.basemods.Decoders.multi_tap
- @registry.register
- class multiTap(Decoder[str, str]):
- @staticmethod
- def priority() -> float:
- return 0.05
-
===========changed ref 15===========
# module: ciphey.basemods.Decoders.tap_code
- @registry.register
- class tap_code(Decoder[str, str]):
- @staticmethod
- def priority() -> float:
- return 0.06
-
===========changed ref 16===========
# module: ciphey.basemods.Decoders.binary
@registry.register
+ class Binary(Decoder[str]):
- class Binary(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 17===========
# module: ciphey.basemods.Decoders.url
@registry.register
+ class Url(Decoder[str]):
- class Url(Decoder[str, str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 18===========
# module: ciphey.basemods.Decoders.reverse
+ @registry.register
- @registry.register_multi((str, str), (bytes, bytes))
+ class Reverse(Decoder[str]):
- class Reverse(Decoder):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 19===========
# module: ciphey.basemods.Decoders.hexadecimal
- @registry.register
- class Hex(ciphey.iface.Decoder[str, bytes]):
- @staticmethod
- def getTarget() -> str:
- return "hex"
-
===========changed ref 20===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "hexadecimal"
+
===========changed ref 21===========
# module: ciphey.basemods.Decoders.hexadecimal
+ @registry.register
+ class Hexadecimal(Decoder[str]):
+ @staticmethod
+ def priority() -> float:
+ return 0.015
+
===========changed ref 22===========
# module: ciphey.basemods.Resources.cipheydists
@registry.register_multi(WordList, Distribution, Translation)
+ class CipheyDists(ResourceLoader):
- class CipheyDists(ciphey.iface.ResourceLoader):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
+ return None
- pass
===========changed ref 23===========
# module: ciphey.basemods.Decoders.octal
@registry.register
+ class Octal(Decoder[str]):
- class Octal(ciphey.iface.Decoder[str, bytes]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 24===========
+ # module: ciphey.basemods.Decoders.leetspeak
+ @registry.register
+ class Leetspeak(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "leetspeak"
+
===========changed ref 25===========
# module: ciphey.basemods.Crackers.hash
@registry.register
+ class HashBuster(Cracker[str]):
- class HashBuster(ciphey.iface.Cracker[str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 26===========
# module: ciphey.basemods.Decoders.unicode
@registry.register
+ class Utf8(Decoder[bytes]):
- class Utf8(ciphey.iface.Decoder[bytes, str]):
@staticmethod
+ def getParams() -> Optional[Dict[str, ParamSpec]]:
- def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
+ return None
- pass
===========changed ref 27===========
# module: ciphey.basemods.Crackers.xor_single
@registry.register
+ class XorSingle(Cracker[bytes]):
- class XorSingle(ciphey.iface.Cracker[bytes]):
- @staticmethod
- def scoreUtility() -> float:
- return 1.5
-
===========changed ref 28===========
# module: ciphey.basemods.Decoders.tap_code
+ @registry.register
+ class Tap_code(Decoder[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "tap_code"
+
===========changed ref 29===========
+ # module: ciphey.basemods.Crackers.xandy
+ @registry.register
+ class Xandy(Cracker[str]):
+ @staticmethod
+ def getTarget() -> str:
+ return "xandy"
+
|
ciphey.ciphey/main
|
Modified
|
Ciphey~Ciphey
|
b29683c784a60ace7feafc58bfa3b2406600dbd9
|
fix can't exit ciphey (#526)
|
<s> module from the given path",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
<0> """Ciphey - Automated Decryption Tool
<1>
<2> Documentation:
<3> https://github.com/Ciphey/Ciphey/wiki\n
<4> Discord (support here, we're online most of the day):
<5> https://discord.ciphey.online/\n
<6> GitHub:
<7> https://github.com/ciphey/ciphey\n
<8>
<9> Ciphey is an automated decryption tool using smart artificial intelligence and natural language processing. Input encrypted text, get the decrypted text back.
<10>
<11> Examples:\n
<12> Basic Usage: ciphey -t "aGVsbG8gbXkgbmFtZSBpcyBiZWU="
<13>
<14> """
<15>
<16> """Function to deal with arguments. Either calls with args or not. Makes Pytest work.
<17>
<18> It gets the arguments in the function definition using locals()
<19> if withArgs is True, that means this is being called with command line args
<20> so go to arg_parsing() to get those args
<21> we then update locals() with the new command line args and remove "withArgs"
<22> This function then calls call_encryption(**result) which passes our dict of args
<23> to the function as its own arguments using dict unpacking.
<24> Returns:
<25> The output of the decryption.
<26> """
<27>
<28> # if user wants to know where appdirs is
<29> # print and exit
<30> if "appdirs" in kwargs and kwargs["appdirs"]:
<31> dirs = AppDirs("Ciphey", "Ciphey")</s>
|
===========below chunk 0===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 1
print(
f"The settings.yml file should be at {os.path.join(path_to_config, 'settings.yml')}"
)
return None
# Now we create the config object
config = iface.Config()
# Load the settings file into the config
load_msg: str
cfg_arg = kwargs["config"]
if cfg_arg is None:
# Make sure that the config dir actually exists
os.makedirs(iface.Config.get_default_dir(), exist_ok=True)
config.load_file(create=True)
load_msg = f"Opened config file at {os.path.join(iface.Config.get_default_dir(), 'config.yml')}"
else:
config.load_file(cfg_arg)
load_msg = f"Opened config file at {cfg_arg}"
# Load the verbosity, so that we can start logging
verbosity = kwargs["verbose"]
quiet = kwargs["quiet"]
if verbosity is None:
if quiet is not None:
verbosity = -quiet
elif quiet is not None:
verbosity -= quiet
if kwargs["greppable"] is not None:
verbosity -= 999
# Use the existing value as a base
config.verbosity += verbosity
config.update_log_level(config.verbosity)
logger.debug(load_msg)
logger.trace(f"Got cmdline args {kwargs}")
# Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config</s>
===========below chunk 1===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 2
<s> # Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config.modules += list(module_arg)
# We need to load formats BEFORE we instantiate objects
if kwargs["bytes"] is not None:
config.update_format("bytes")
# Next, load the objects
params = kwargs["param"]
if params is not None:
for i in params:
key, value = i.split("=", 1)
parent, name = key.split(".", 1)
config.update_param(parent, name, value)
config.update("checker", kwargs["checker"])
config.update("searcher", kwargs["searcher"])
config.update("default_dist", kwargs["default_dist"])
config.complete_config()
logger.trace(f"Command line opts: {kwargs}")
logger.trace(f"Config finalised: {config}")
# Finally, we load the plaintext
if kwargs["text"] is None:
if kwargs["file"] is not None:
kwargs["text"] = kwargs["file"].read()
elif kwargs["text_stdin"] is not None:
kwargs["text"] = kwargs["text_stdin"]
else:
# else print help menu
print("[bold red]Error. No inputs were given to Ciphey. [bold red]")
@click.pass_context
</s>
===========below chunk 2===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 3
<s> all_procedure(ctx):
print_help(ctx)
all_procedure()
return None
if issubclass(config.objs["format"], type(kwargs["text"])):
pass
elif config.objs["format"] == str and isinstance(kwargs["text"], bytes):
kwargs["text"] = kwargs["text"].decode("utf-8")
elif config.objs["format"] == bytes and isinstance(kwargs["text"], str):
kwargs["text"] = kwargs["text"].encode("utf-8")
else:
raise TypeError(f"Cannot load type {config.format} from {type(kwargs['text'])}")
result: Optional[str]
# if debug or quiet mode is on, run without spinner
if config.verbosity != 0:
result = decrypt(config, kwargs["text"])
else:
# else, run with spinner if verbosity is 0
with yaspin(Spinners.earth, "Thinking") as sp:
config.set_spinner(sp)
result = decrypt(config, kwargs["text"])
if result is None:
result = "Could not find any solutions."
print(result)
===========unchanged ref 0===========
at: ciphey.ciphey
get_name(ctx, param, value)
at: click.decorators
command(name: Optional[str]=..., cls: Optional[Type[Command]]=..., context_settings: Optional[Dict[Any, Any]]=..., help: Optional[str]=..., epilog: Optional[str]=..., short_help: Optional[str]=..., options_metavar: str=..., add_help_option: bool=..., hidden: bool=..., deprecated: bool=...) -> Callable[[Callable[..., Any]], Command]
argument(*param_decls: Text, cls: Type[Argument]=..., required: Optional[bool]=..., type: Optional[_ConvertibleType]=..., default: Optional[Any]=..., callback: Optional[_Callback]=..., nargs: Optional[int]=..., metavar: Optional[str]=..., expose_value: bool=..., is_eager: bool=..., envvar: Optional[Union[str, List[str]]]=..., autocompletion: Optional[Callable[[Any, List[str], str], List[Union[str, Tuple[str, str]]]]]=...) -> _IdentityFunction
|
|
ciphey.ciphey/main
|
Modified
|
Ciphey~Ciphey
|
345d15021b7e2ce3cb00cacea132e40c93294afa
|
Fixed some stuff with ausearch and bases (#528)
|
<s> module from the given path",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
<0> """Ciphey - Automated Decryption Tool
<1>
<2> Documentation:
<3> https://github.com/Ciphey/Ciphey/wiki\n
<4> Discord (support here, we're online most of the day):
<5> https://discord.ciphey.online/\n
<6> GitHub:
<7> https://github.com/ciphey/ciphey\n
<8>
<9> Ciphey is an automated decryption tool using smart artificial intelligence and natural language processing. Input encrypted text, get the decrypted text back.
<10>
<11> Examples:\n
<12> Basic Usage: ciphey -t "aGVsbG8gbXkgbmFtZSBpcyBiZWU="
<13>
<14> """
<15>
<16> """Function to deal with arguments. Either calls with args or not. Makes Pytest work.
<17>
<18> It gets the arguments in the function definition using locals()
<19> if withArgs is True, that means this is being called with command line args
<20> so go to arg_parsing() to get those args
<21> we then update locals() with the new command line args and remove "withArgs"
<22> This function then calls call_encryption(**result) which passes our dict of args
<23> to the function as its own arguments using dict unpacking.
<24> Returns:
<25> The output of the decryption.
<26> """
<27>
<28> # if user wants to know where appdirs is
<29> # print and exit
<30> if "appdirs" in kwargs and kwargs["appdirs"]:
<31> dirs = AppDirs("Ciphey", "Ciphey")</s>
|
===========below chunk 0===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 1
print(
f"The settings.yml file should be at {os.path.join(path_to_config, 'settings.yml')}"
)
return None
# Now we create the config object
config = iface.Config()
# Load the settings file into the config
load_msg: str
cfg_arg = kwargs["config"]
if cfg_arg is None:
# Make sure that the config dir actually exists
os.makedirs(iface.Config.get_default_dir(), exist_ok=True)
config.load_file(create=True)
load_msg = f"Opened config file at {os.path.join(iface.Config.get_default_dir(), 'config.yml')}"
else:
config.load_file(cfg_arg)
load_msg = f"Opened config file at {cfg_arg}"
# Load the verbosity, so that we can start logging
verbosity = kwargs["verbose"]
quiet = kwargs["quiet"]
if verbosity is None:
if quiet is not None:
verbosity = -quiet
elif quiet is not None:
verbosity -= quiet
if kwargs["greppable"] is not None:
verbosity -= 999
# Use the existing value as a base
config.verbosity += verbosity
config.update_log_level(config.verbosity)
logger.debug(load_msg)
logger.trace(f"Got cmdline args {kwargs}")
# Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config</s>
===========below chunk 1===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 2
<s> # Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config.modules += list(module_arg)
# We need to load formats BEFORE we instantiate objects
if kwargs["bytes"] is not None:
config.update_format("bytes")
# Next, load the objects
params = kwargs["param"]
if params is not None:
for i in params:
key, value = i.split("=", 1)
parent, name = key.split(".", 1)
config.update_param(parent, name, value)
config.update("checker", kwargs["checker"])
config.update("searcher", kwargs["searcher"])
config.update("default_dist", kwargs["default_dist"])
config.complete_config()
logger.trace(f"Command line opts: {kwargs}")
logger.trace(f"Config finalised: {config}")
# Finally, we load the plaintext
if kwargs["text"] is None:
if kwargs["file"] is not None:
kwargs["text"] = kwargs["file"].read()
elif kwargs["text_stdin"] is not None:
kwargs["text"] = kwargs["text_stdin"]
else:
# else print help menu
print("[bold red]Error. No inputs were given to Ciphey. [bold red]")
@click.pass_context
</s>
===========below chunk 2===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 3
<s> all_procedure(ctx):
print_help(ctx)
all_procedure()
return None
if issubclass(config.objs["format"], type(kwargs["text"])):
pass
elif config.objs["format"] == str and isinstance(kwargs["text"], bytes):
kwargs["text"] = kwargs["text"].decode("utf-8")
elif config.objs["format"] == bytes and isinstance(kwargs["text"], str):
kwargs["text"] = kwargs["text"].encode("utf-8")
else:
raise TypeError(f"Cannot load type {config.format} from {type(kwargs['text'])}")
result: Optional[str]
# if debug or quiet mode is on, run without spinner
try:
if config.verbosity != 0:
result = decrypt(config, kwargs["text"])
else:
# else, run with spinner if verbosity is 0
with yaspin(Spinners.earth, "Thinking") as sp:
config.set_spinner(sp)
result = decrypt(config, kwargs["text"])
if result is None:
result = "Could not find any solutions."
print(result)
except KeyboardInterrupt:
sys.exit()
===========unchanged ref 0===========
at: ciphey.ciphey
get_name(ctx, param, value)
at: click.decorators
command(name: Optional[str]=..., cls: Optional[Type[Command]]=..., context_settings: Optional[Dict[Any, Any]]=..., help: Optional[str]=..., epilog: Optional[str]=..., short_help: Optional[str]=..., options_metavar: str=..., add_help_option: bool=..., hidden: bool=..., deprecated: bool=...) -> Callable[[Callable[..., Any]], Command]
argument(*param_decls: Text, cls: Type[Argument]=..., required: Optional[bool]=..., type: Optional[_ConvertibleType]=..., default: Optional[Any]=..., callback: Optional[_Callback]=..., nargs: Optional[int]=..., metavar: Optional[str]=..., expose_value: bool=..., is_eager: bool=..., envvar: Optional[Union[str, List[str]]]=..., autocompletion: Optional[Callable[[Any, List[str], str], List[Union[str, Tuple[str, str]]]]]=...) -> _IdentityFunction
|
|
ciphey.basemods.Decoders.bases/_dispatch
|
Modified
|
Ciphey~Ciphey
|
345d15021b7e2ce3cb00cacea132e40c93294afa
|
Fixed some stuff with ausearch and bases (#528)
|
<3>:<add> # remove all whitespace
<add> ctext = re.sub(r"\s+", "", ctext, re.UNICODE)
|
# module: ciphey.basemods.Decoders.bases
def _dispatch(self: Any, ctext: str, func: Callable[[str], bytes]) -> Optional[bytes]:
<0> logger.trace(f"Attempting {self.getTarget()}")
<1>
<2> try:
<3> result = func(ctext)
<4> logger.debug(f"{self.getTarget()} successful, returning {result}")
<5> return result
<6> except ValueError:
<7> logger.trace(f"Failed to decode {self.getTarget()}")
<8> return None
<9>
|
===========unchanged ref 0===========
at: re
UNICODE = RegexFlag.UNICODE
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
===========changed ref 0===========
<s> module from the given path",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
"""Ciphey - Automated Decryption Tool
Documentation:
https://github.com/Ciphey/Ciphey/wiki\n
Discord (support here, we're online most of the day):
https://discord.ciphey.online/\n
GitHub:
https://github.com/ciphey/ciphey\n
Ciphey is an automated decryption tool using smart artificial intelligence and natural language processing. Input encrypted text, get the decrypted text back.
Examples:\n
Basic Usage: ciphey -t "aGVsbG8gbXkgbmFtZSBpcyBiZWU="
"""
"""Function to deal with arguments. Either calls with args or not. Makes Pytest work.
It gets the arguments in the function definition using locals()
if withArgs is True, that means this is being called with command line args
so go to arg_parsing() to get those args
we then update locals() with the new command line args and remove "withArgs"
This function then calls call_encryption(**result) which passes our dict of args
to the function as its own arguments using dict unpacking.
Returns:
The output of the decryption.
"""
# if user wants to know where appdirs is
# print and exit
if "appdirs" in kwargs and kwargs["appdirs"]:
dirs = AppDirs("Ciphey", "Ciphey")
path_to_config = dirs.user_config_dir
print(
f"The settings.yml file should be at {</s>
===========changed ref 1===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 1
<s> path_to_config = dirs.user_config_dir
print(
f"The settings.yml file should be at {os.path.join(path_to_config, 'settings.yml')}"
)
return None
# Now we create the config object
config = iface.Config()
# Load the settings file into the config
load_msg: str
cfg_arg = kwargs["config"]
if cfg_arg is None:
# Make sure that the config dir actually exists
os.makedirs(iface.Config.get_default_dir(), exist_ok=True)
config.load_file(create=True)
load_msg = f"Opened config file at {os.path.join(iface.Config.get_default_dir(), 'config.yml')}"
else:
config.load_file(cfg_arg)
load_msg = f"Opened config file at {cfg_arg}"
# Load the verbosity, so that we can start logging
verbosity = kwargs["verbose"]
quiet = kwargs["quiet"]
if verbosity is None:
if quiet is not None:
verbosity = -quiet
elif quiet is not None:
verbosity -= quiet
if kwargs["greppable"] is not None:
verbosity -= 999
# Use the existing value as a base
config.verbosity += verbosity
config.update_log_level(config.verbosity)
logger.debug(load_msg)</s>
===========changed ref 2===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 2
<s> logger.trace(f"Got cmdline args {kwargs}")
# Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config.modules += list(module_arg)
# We need to load formats BEFORE we instantiate objects
if kwargs["bytes"] is not None:
config.update_format("bytes")
# Next, load the objects
params = kwargs["param"]
if params is not None:
for i in params:
key, value = i.split("=", 1)
parent, name = key.split(".", 1)
config.update_param(parent, name, value)
config.update("checker", kwargs["checker"])
config.update("searcher", kwargs["searcher"])
config.update("default_dist", kwargs["default_dist"])
config.complete_config()
logger.trace(f"Command line opts: {kwargs}")
logger.trace(f"Config finalised: {config}")
# Finally, we load the plaintext
if kwargs["text"] is None:
if kwargs["file"] is not None:
kwargs["text"] = kwargs["file"].read()
elif kwargs["text_stdin"] is not None:
kwargs["text"] = kwargs["text_stdin"]
else:
# else print help menu
print("[bold red]Error. No inputs were given to Ciphey.</s>
|
tests.test_main/test_braille
|
Modified
|
Ciphey~Ciphey
|
c4fca81f00db02320d8b626a348bf9af5ff08c8e
|
Fix Braille test
|
<2>:<add> "⠓⠑⠇⠇⠕⠀⠍⠽⠀⠝⠁⠍⠑⠀⠊⠎⠀⠃⠑⠑⠀⠁⠝⠙⠀⠊⠀⠇⠊⠅⠑⠀⠙⠕⠛⠀⠁⠝⠙⠀⠁⠏⠏⠇⠑⠀⠁⠝⠙⠀⠞⠗⠑⠑"
<del> "⠠⠓⠑⠇⠇⠕⠀⠍⠽⠀⠝⠁⠍⠑⠀⠊⠎⠀⠃⠑⠑⠀⠁⠝⠙⠀⠠⠊⠀⠇⠊⠅⠑⠀⠙⠕⠛⠀⠁⠝⠙⠀⠁⠏⠏⠇⠑⠀⠁⠝⠙⠀⠞⠗⠑⠑"
<4>:<add> assert res.lower() == answer_str.lower()
<del> assert res == answer_str
|
# module: tests.test_main
def test_braille():
<0> res = decrypt(
<1> Config.library_default().complete_config(),
<2> "⠠⠓⠑⠇⠇⠕⠀⠍⠽⠀⠝⠁⠍⠑⠀⠊⠎⠀⠃⠑⠑⠀⠁⠝⠙⠀⠠⠊⠀⠇⠊⠅⠑⠀⠙⠕⠛⠀⠁⠝⠙⠀⠁⠏⠏⠇⠑⠀⠁⠝⠙⠀⠞⠗⠑⠑"
<3> )
<4> assert res == answer_str
<5>
|
===========unchanged ref 0===========
at: ciphey.ciphey
decrypt(config: iface.Config, ctext: Any) -> Union[str, bytes]
at: ciphey.iface._config
Config()
at: ciphey.iface._config.Config
complete_config() -> "Config"
library_default()
at: tests.test_main
answer_str = "Hello my name is bee and I like dog and apple and tree"
|
ciphey.basemods.Decoders.braille/Braille.decode
|
Modified
|
Ciphey~Ciphey
|
b06e3a0dd66d7a06f8f1acaa9ff51ffd8ce60800
|
Clean up some things
|
<0>:<add> """
<add> Performs Braille decoding
<add> """
<add> logger.trace("Attempting Braille")
<add> ctext_decoded = ""
<2>:<add> if symbol in ctext:
<del> if symbol in text:
<7>:<add> logger.trace("Failed to decode Braille due to invalid characters")
<10>:<add> ctext = re.sub(pattern, value, ctext)
<del> text = re.sub(pattern, value, text)
<13>:<add> for word in ctext.split(" "):
<del> for word in text.split(' '):
<14>:<add> # If two commas are in front of a word, uppercase the word and remove the comma
<del> # if two commas are infront of word, uppercase word and remove comma
<15>:<add> if word[:2].find(",,") != -1:
<del> if (word[:2].find(',,') != -1):
<16>:<add> wordArr.append(word.replace(",,", "").upper())
<del> wordArr.append(word.replace(',,', '').upper())
<22>:<add> # If one comma is in front of a word, capitalize the word and remove the comma
<del> # if one comma is infront of word, capitalize word and remove comma
<23>:<add> if word[0].find(",") != -1:
<del> if (word[0].find(',') != -1):
<24>:<add> result.append(word.replace(",", "").capitalize())
<del> result.append(word.replace(',', '').capitalize())
<27>:<add> ctext_decoded = " ".join(result)
|
# module: ciphey.basemods.Decoders.braille
@registry.register
class Braille(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: T) -> Optional[U]:
<0> braille_matches = 0
<1> for symbol in self.BRAILLE_DICT_INV.values():
<2> if symbol in text:
<3> braille_matches += 1
<4> else:
<5> continue
<6> if braille_matches == 0:
<7> return None
<8>
<9> for pattern, value in self.BRAILLE_DICT.items():
<10> text = re.sub(pattern, value, text)
<11>
<12> wordArr = []
<13> for word in text.split(' '):
<14> # if two commas are infront of word, uppercase word and remove comma
<15> if (word[:2].find(',,') != -1):
<16> wordArr.append(word.replace(',,', '').upper())
<17> else:
<18> wordArr.append(word)
<19>
<20> result = []
<21> for word in wordArr:
<22> # if one comma is infront of word, capitalize word and remove comma
<23> if (word[0].find(',') != -1):
<24> result.append(word.replace(',', '').capitalize())
<25> else:
<26> result.append(word)
<27>
<28> return ' '.join(result)
<29>
|
===========unchanged ref 0===========
at: ciphey.basemods.Decoders.braille.Braille.__init__
self.BRAILLE_DICT = config.get_resource(self._params()["dict"], Translation)
self.BRAILLE_DICT_INV = {v: k for k, v in self.BRAILLE_DICT.items()}
at: re
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
ciphey.basemods.Decoders.braille/Braille.getParams
|
Modified
|
Ciphey~Ciphey
|
b06e3a0dd66d7a06f8f1acaa9ff51ffd8ce60800
|
Clean up some things
|
<2>:<add> desc="The Braille dictionary to use",
<del> desc="The braille dictionary to use",
|
# module: ciphey.basemods.Decoders.braille
@registry.register
class Braille(Decoder[str]):
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
<0> return {
<1> "dict": ParamSpec(
<2> desc="The braille dictionary to use",
<3> req=False,
<4> default="cipheydists::translate::braille",
<5> )
<6> }
<7>
|
===========unchanged ref 0===========
at: ciphey.iface._config
Config()
at: ciphey.iface._config.Config
get_resource(res_name: str, t: Optional[Type]=None) -> Any
at: ciphey.iface._modules
ParamSpec(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
Translation = Dict[str, str]
at: ciphey.iface._modules.ConfigurableModule
getParams() -> Optional[Dict[str, ParamSpec]]
_params()
at: ciphey.iface._modules.Decoder
__init__(config: Config)
__init__(self, config: Config)
at: typing
Dict = _alias(dict, 2, inst=False, name='Dict')
===========changed ref 0===========
# module: ciphey.basemods.Decoders.braille
@registry.register
class Braille(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: T) -> Optional[U]:
+ """
+ Performs Braille decoding
+ """
+ logger.trace("Attempting Braille")
+ ctext_decoded = ""
braille_matches = 0
for symbol in self.BRAILLE_DICT_INV.values():
+ if symbol in ctext:
- if symbol in text:
braille_matches += 1
else:
continue
if braille_matches == 0:
+ logger.trace("Failed to decode Braille due to invalid characters")
return None
for pattern, value in self.BRAILLE_DICT.items():
+ ctext = re.sub(pattern, value, ctext)
- text = re.sub(pattern, value, text)
wordArr = []
+ for word in ctext.split(" "):
- for word in text.split(' '):
+ # If two commas are in front of a word, uppercase the word and remove the comma
- # if two commas are infront of word, uppercase word and remove comma
+ if word[:2].find(",,") != -1:
- if (word[:2].find(',,') != -1):
+ wordArr.append(word.replace(",,", "").upper())
- wordArr.append(word.replace(',,', '').upper())
else:
wordArr.append(word)
result = []
for word in wordArr:
+ # If one comma is in front of a word, capitalize the word and remove the comma
- # if one comma is infront of word, capitalize word and remove comma
+ if word[0].find(",") != -1:
- if (word[0].find(',') != -1):
+ result.append(word.replace(",", "").capitalize())
- result.</s>
===========changed ref 1===========
# module: ciphey.basemods.Decoders.braille
@registry.register
class Braille(Decoder[str]):
+ def decode(self, ctext: T) -> Optional[U]:
- def decode(self, text: T) -> Optional[U]:
# offset: 1
<s>',') != -1):
+ result.append(word.replace(",", "").capitalize())
- result.append(word.replace(',', '').capitalize())
else:
result.append(word)
+ ctext_decoded = " ".join(result)
+ logger.debug(f"Braille successful, returning '{ctext_decoded}'")
+ return ctext_decoded
- return ' '.join(result)
-
|
ciphey.ciphey/main
|
Modified
|
Ciphey~Ciphey
|
2bc622172719416addf647bb419006368de919b4
|
Revert changes (#547)
|
<s> module from the given path",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
<0> """Ciphey - Automated Decryption Tool
<1>
<2> Documentation:
<3> https://github.com/Ciphey/Ciphey/wiki\n
<4> Discord (support here, we're online most of the day):
<5> https://discord.ciphey.online/\n
<6> GitHub:
<7> https://github.com/ciphey/ciphey\n
<8>
<9> Ciphey is an automated decryption tool using smart artificial intelligence and natural language processing. Input encrypted text, get the decrypted text back.
<10>
<11> Examples:\n
<12> Basic Usage: ciphey -t "aGVsbG8gbXkgbmFtZSBpcyBiZWU="
<13>
<14> """
<15>
<16> """Function to deal with arguments. Either calls with args or not. Makes Pytest work.
<17>
<18> It gets the arguments in the function definition using locals()
<19> if withArgs is True, that means this is being called with command line args
<20> so go to arg_parsing() to get those args
<21> we then update locals() with the new command line args and remove "withArgs"
<22> This function then calls call_encryption(**result) which passes our dict of args
<23> to the function as its own arguments using dict unpacking.
<24> Returns:
<25> The output of the decryption.
<26> """
<27>
<28> # if user wants to know where appdirs is
<29> # print and exit
<30> if "appdirs" in kwargs and kwargs["appdirs"]:
<31> dirs = AppDirs("Ciphey", "Ciphey")</s>
|
===========below chunk 0===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 1
print(
f"The settings.yml file should be at {os.path.join(path_to_config, 'settings.yml')}"
)
return None
# Now we create the config object
config = iface.Config()
# Load the settings file into the config
load_msg: str
cfg_arg = kwargs["config"]
if cfg_arg is None:
# Make sure that the config dir actually exists
os.makedirs(iface.Config.get_default_dir(), exist_ok=True)
config.load_file(create=True)
load_msg = f"Opened config file at {os.path.join(iface.Config.get_default_dir(), 'config.yml')}"
else:
config.load_file(cfg_arg)
load_msg = f"Opened config file at {cfg_arg}"
# Load the verbosity, so that we can start logging
verbosity = kwargs["verbose"]
quiet = kwargs["quiet"]
if verbosity is None:
if quiet is not None:
verbosity = -quiet
elif quiet is not None:
verbosity -= quiet
if kwargs["greppable"] is not None:
verbosity -= 999
# Use the existing value as a base
config.verbosity += verbosity
config.update_log_level(config.verbosity)
logger.debug(load_msg)
logger.trace(f"Got cmdline args {kwargs}")
# Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config</s>
===========below chunk 1===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 2
<s> # Now we load the modules
module_arg = kwargs["module"]
if module_arg is not None:
config.modules += list(module_arg)
# We need to load formats BEFORE we instantiate objects
if kwargs["bytes"] is not None:
config.update_format("bytes")
# Next, load the objects
params = kwargs["param"]
if params is not None:
for i in params:
key, value = i.split("=", 1)
parent, name = key.split(".", 1)
config.update_param(parent, name, value)
config.update("checker", kwargs["checker"])
config.update("searcher", kwargs["searcher"])
config.update("default_dist", kwargs["default_dist"])
config.complete_config()
logger.trace(f"Command line opts: {kwargs}")
logger.trace(f"Config finalised: {config}")
# Finally, we load the plaintext
if kwargs["text"] is None:
if kwargs["file"] is not None:
kwargs["text"] = kwargs["file"].read()
elif kwargs["text_stdin"] is not None:
kwargs["text"] = kwargs["text_stdin"]
else:
# else print help menu
print("[bold red]Error. No inputs were given to Ciphey. [bold red]")
@click.pass_context
</s>
===========below chunk 2===========
<s>",
type=click.Path(),
multiple=True,
)
@click.option(
"-A",
"--appdirs",
help="Print the location of where Ciphey wants the settings file to be",
type=bool,
is_flag=True,
)
@click.option("-f", "--file", type=click.File("rb"), required=False)
@click.argument("text_stdin", callback=get_name, required=False)
def main(**kwargs):
# offset: 3
<s> all_procedure(ctx):
print_help(ctx)
all_procedure()
return None
if issubclass(config.objs["format"], type(kwargs["text"])):
pass
elif config.objs["format"] == str and isinstance(kwargs["text"], bytes):
kwargs["text"] = kwargs["text"].decode("utf-8")
elif config.objs["format"] == bytes and isinstance(kwargs["text"], str):
kwargs["text"] = kwargs["text"].encode("utf-8")
else:
raise TypeError(f"Cannot load type {config.format} from {type(kwargs['text'])}")
result: Optional[str]
# if debug or quiet mode is on, run without spinner
try:
if config.verbosity != 0:
result = decrypt(config, kwargs["text"])
else:
# else, run with spinner if verbosity is 0
with yaspin.yaspin(Spinners.earth, "Thinking") as sp:
config.set_spinner(sp)
result = decrypt(config, kwargs["text"])
if result is None:
result = "Could not find any solutions."
print(result)
except KeyboardInterrupt:
sys.exit()
===========unchanged ref 0===========
at: ciphey.ciphey
get_name(ctx, param, value)
at: click.decorators
argument(*param_decls: Text, cls: Type[Argument]=..., required: Optional[bool]=..., type: Optional[_ConvertibleType]=..., default: Optional[Any]=..., callback: Optional[_Callback]=..., nargs: Optional[int]=..., metavar: Optional[str]=..., expose_value: bool=..., is_eager: bool=..., envvar: Optional[Union[str, List[str]]]=..., autocompletion: Optional[Callable[[Any, List[str], str], List[Union[str, Tuple[str, str]]]]]=...) -> _IdentityFunction
|
|
noxfile/safety
|
Modified
|
Ciphey~Ciphey
|
a8e1e9579c5fc2c736f6227b37e6a05ec78ed0e2
|
removed --no-hashes from CI
|
<5>:<del> "--without-hashes",
|
# module: noxfile
@nox.session
def safety(session):
<0> session.run(
<1> "poetry",
<2> "export",
<3> "--dev",
<4> "--format=requirements.txt",
<5> "--without-hashes",
<6> "--output=requirements.txt",
<7> external=True,
<8> )
<9> install_with_constraints(session, "safety")
<10> session.run("safety", "check", "--file=requirements.txt", "--full-report")
<11>
|
===========unchanged ref 0===========
at: noxfile
install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None
|
noxfile/safety
|
Modified
|
Ciphey~Ciphey
|
f42775f8819af04ca57e8baa4a0bfdf6184c94c5
|
nox works locally
|
<9>:<del> session.run("safety", "check", "--file=requirements.txt", "--full-report")
|
# module: noxfile
@nox.session
def safety(session):
<0> session.run(
<1> "poetry",
<2> "export",
<3> "--dev",
<4> "--format=requirements.txt",
<5> "--output=requirements.txt",
<6> external=True,
<7> )
<8> install_with_constraints(session, "safety")
<9> session.run("safety", "check", "--file=requirements.txt", "--full-report")
<10>
|
===========unchanged ref 0===========
at: noxfile
install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None
|
noxfile/safety
|
Modified
|
Ciphey~Ciphey
|
d348b42490e4dfb57b565030ccd4f0bdb6adc082
|
really removing safety
|
<0>:<del> session.run(
<1>:<del> "poetry",
<2>:<del> "export",
<3>:<del> "--dev",
<4>:<del> "--format=requirements.txt",
<5>:<del> "--output=requirements.txt",
<6>:<del> external=True,
<7>:<del> )
<8>:<del> install_with_constraints(session, "safety")
<9>:<add> None
|
# module: noxfile
@nox.session
def safety(session):
<0> session.run(
<1> "poetry",
<2> "export",
<3> "--dev",
<4> "--format=requirements.txt",
<5> "--output=requirements.txt",
<6> external=True,
<7> )
<8> install_with_constraints(session, "safety")
<9>
|
===========unchanged ref 0===========
at: noxfile
install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None
|
ciphey.basemods.Searchers.ausearch/convert_edge_info
|
Modified
|
Ciphey~Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
Bee fix ausearch (#603)
|
<0>:<del> return cipheycore.ausearch_edge(
<1>:<del> info.success_likelihood, info.success_runtime, info.failure_runtime
<2>:<del> )
<3>:<add> return 1
|
# module: ciphey.basemods.Searchers.ausearch
def convert_edge_info(info: CrackInfo):
<0> return cipheycore.ausearch_edge(
<1> info.success_likelihood, info.success_runtime, info.failure_runtime
<2> )
<3>
| |
ciphey.basemods.Searchers.ausearch/AuSearch.expand_crackers
|
Modified
|
Ciphey~Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
Bee fix ausearch (#603)
|
<9>:<add> Edge(source=node, route=inst)
<del> Edge(source=node, route=inst, info=convert_edge_info(inst.getInfo(res)))
|
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
# def expand(self, edge: Edge) -> List[Edge]:
# """Evaluates the destination of the given, and adds its child edges to the pool"""
# edge.dest = Node(parent=edge, level=edge.route(edge.source.level.result.value))
def expand_crackers(self, node: Node) -> None:
<0> if node.depth >= self.max_cipher_depth:
<1> return
<2>
<3> res = node.level.result.value
<4> additional_work = []
<5>
<6> for i in self.get_crackers_for(type(res)):
<7> inst = self._config()(i)
<8> additional_work.append(
<9> Edge(source=node, route=inst, info=convert_edge_info(inst.getInfo(res)))
<10> )
<11> priority = min(node.depth, self.priority_cap)
<12> if self.invert_priority:
<13> priority = -priority
<14>
<15> self.work.add_work(priority, additional_work)
<16>
|
===========changed ref 0===========
# module: ciphey.basemods.Searchers.ausearch
def convert_edge_info(info: CrackInfo):
- return cipheycore.ausearch_edge(
- info.success_likelihood, info.success_runtime, info.failure_runtime
- )
+ return 1
===========changed ref 1===========
# module: ciphey.basemods.Searchers.ausearch
@dataclass
class Edge:
source: Node
route: Union[Cracker, Decoder]
dest: Optional[Node] = None
- # Info is not filled in for Decoders
- info: Optional[cipheycore.ausearch_edge] = None
|
ciphey.basemods.Searchers.ausearch/AuSearch.search
|
Modified
|
Ciphey~Ciphey
|
f7d21ce0993eeff0b53cec8717dfbd8f8419f8f5
|
Bee fix ausearch (#603)
|
<21>:<del> infos = [i.info for i in chunk]
<34>:<del> logger.trace(f"{len(infos)} remaining on this level")
<35>:<del> step_res = cipheycore.ausearch_minimise(infos)
<36>:<add> step_res = 0.1
<add> edge: Edge = chunk.pop(0)
<del> edge: Edge = chunk.pop(step_res.index)
<38>:<add> f"Weight is currently {0} "
<del> f"Weight is currently {step_res.weight} "
<42>:<del> del infos[
|
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
def search(self, ctext: Any) -> Optional[SearchResult]:
<0> logger.trace(
<1> f"""Beginning AuSearch with {"inverted" if self.invert_priority else "normal"} priority"""
<2> )
<3>
<4> try:
<5> root = Node.root(self._config(), ctext)
<6> except DuplicateNode:
<7> return None
<8>
<9> check_res = self._config().objs["checker"](ctext)
<10> if check_res is not None:
<11> return SearchResult(check_res=check_res, path=[root.level])
<12>
<13> try:
<14> self.recursive_expand(root, False)
<15>
<16> while True:
<17> if self.work.empty():
<18> break
<19> # Get the highest level result
<20> chunk = self.work.get_work_chunk()
<21> infos = [i.info for i in chunk]
<22> # Work through all of this level's results
<23> while len(chunk) != 0:
<24> max_depth = 0
<25> for i in chunk:
<26> if i.source.depth > max_depth:
<27> max_depth = i.source.depth
<28> logger.debug(f"At depth {chunk[0].source.depth}")
<29>
<30> # if self.disable_priority:
<31> # chunk += self.work.get_work_chunk()
<32> # infos = [i.info for i in chunk]
<33>
<34> logger.trace(f"{len(infos)} remaining on this level")
<35> step_res = cipheycore.ausearch_minimise(infos)
<36> edge: Edge = chunk.pop(step_res.index)
<37> logger.trace(
<38> f"Weight is currently {step_res.weight} "
<39> f"when we pick {type(edge.route).__name__.lower()} "
<40> f"with depth {edge.source.depth}"
<41> )
<42> del infos[</s>
|
===========below chunk 0===========
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
def search(self, ctext: Any) -> Optional[SearchResult]:
# offset: 1
# Expand the node
res = edge.route(edge.source.level.result.value)
if res is None:
continue
for i in res:
try:
node = Node.cracker(
config=self._config(), edge_template=edge, result=i
)
self.recursive_expand(node)
except DuplicateNode:
continue
except AuSearchSuccessful as e:
logger.debug("AuSearch succeeded")
return SearchResult(path=e.target.get_path(), check_res=e.info)
logger.debug("AuSearch failed")
===========changed ref 0===========
# module: ciphey.basemods.Searchers.ausearch
def convert_edge_info(info: CrackInfo):
- return cipheycore.ausearch_edge(
- info.success_likelihood, info.success_runtime, info.failure_runtime
- )
+ return 1
===========changed ref 1===========
# module: ciphey.basemods.Searchers.ausearch
@dataclass
class Edge:
source: Node
route: Union[Cracker, Decoder]
dest: Optional[Node] = None
- # Info is not filled in for Decoders
- info: Optional[cipheycore.ausearch_edge] = None
===========changed ref 2===========
# module: ciphey.basemods.Searchers.ausearch
@registry.register
class AuSearch(Searcher):
# def expand(self, edge: Edge) -> List[Edge]:
# """Evaluates the destination of the given, and adds its child edges to the pool"""
# edge.dest = Node(parent=edge, level=edge.route(edge.source.level.result.value))
def expand_crackers(self, node: Node) -> None:
if node.depth >= self.max_cipher_depth:
return
res = node.level.result.value
additional_work = []
for i in self.get_crackers_for(type(res)):
inst = self._config()(i)
additional_work.append(
+ Edge(source=node, route=inst)
- Edge(source=node, route=inst, info=convert_edge_info(inst.getInfo(res)))
)
priority = min(node.depth, self.priority_cap)
if self.invert_priority:
priority = -priority
self.work.add_work(priority, additional_work)
|
seq2seq.run_seq2seq_flax/DataTrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
a104edb2b44328783505607f6348e24619715d31
|
accept tsv files as inputs.
|
<5>:<add> assert extension in ["tsv", "csv", "json"], "`train_file` should be a tsv, csv or json file."
<del> assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
<8>:<add> assert extension in ["tsv", "csv", "json"], "`validation_file` should be a tsv, csv or json file."
<del> assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
|
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
def __post_init__(self):
<0> if self.dataset_name is None and self.train_file is None and self.validation_file is None:
<1> raise ValueError("Need either a dataset name or a training/validation file.")
<2> else:
<3> if self.train_file is not None:
<4> extension = self.train_file.split(".")[-1]
<5> assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
<6> if self.validation_file is not None:
<7> extension = self.validation_file.split(".")[-1]
<8> assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
<9> if self.val_max_target_length is None:
<10> self.val_max_target_length = self.max_target_length
<11>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax.DataTrainingArguments
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default='caption',
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
encoding_column: Optional[str] = field(
default='encoding',
metadata={"help": "The name of the column in the datasets containing the image encodings."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
===========unchanged ref 1===========
val_max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
"which is used during evaluation."
},
)
===========unchanged ref 2===========
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
|
seq2seq.run_seq2seq_flax/CustomFlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
a841a4ce3b23dc2e3ba0bbbca99b0ca9e548b3c2
|
Decoder: set eos to an unreachable value, set min_length=max_length to force generation with the required number of tokens.
|
<19>:<add> decoder_config.min_length = OUTPUT_LENGTH
<add> decoder_config.max_length = OUTPUT_LENGTH
|
# module: seq2seq.run_seq2seq_flax
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
<0> # we keep shared to easily load pre-trained weights
<1> self.shared = nn.Embed(
<2> self.config.vocab_size,
<3> self.config.d_model,
<4> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<5> dtype=self.dtype,
<6> )
<7> # a separate embedding is used for the decoder
<8> self.decoder_embed = nn.Embed(
<9> OUTPUT_VOCAB_SIZE,
<10> self.config.d_model,
<11> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<12> dtype=self.dtype,
<13> )
<14> self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
<15>
<16> # the decoder has a different config
<17> decoder_config = BartConfig(self.config.to_dict())
<18> decoder_config.max_position_embeddings = OUTPUT_LENGTH
<19> decoder_config.vocab_size = OUTPUT_VOCAB_SIZE
<20> self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
<21>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax
OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
to_dict() -> Dict[str, Any]
at: transformers.configuration_utils.PretrainedConfig.__init__
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
at: transformers.models.bart.configuration_bart
BartConfig(vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.init_std = init_std
===========unchanged ref 1===========
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
|
seq2seq.run_seq2seq_flax/write_metric
|
Modified
|
borisdayma~dalle-mini
|
498559f075c4d96bc0b8f3dc839283eab0e2ffa2
|
feat: log metrics more frequently
|
<4>:<add> tag = f"train_epoch/{key}"
<del> tag = f"train_{key}"
<9>:<add> summary_writer.scalar(f"eval/{metric_name}", value, step)
<del> summary_writer.scalar(f"eval_{metric_name}", value, step)
|
# module: seq2seq.run_seq2seq_flax
def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step):
<0> summary_writer.scalar("train_time", train_time, step)
<1>
<2> train_metrics = get_metrics(train_metrics)
<3> for key, vals in train_metrics.items():
<4> tag = f"train_{key}"
<5> for i, val in enumerate(vals):
<6> summary_writer.scalar(tag, val, step - len(vals) + i + 1)
<7>
<8> for metric_name, value in eval_metrics.items():
<9> summary_writer.scalar(f"eval_{metric_name}", value, step)
<10>
|
===========changed ref 0===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default='caption',
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
encoding_column: Optional[str] = field(
default='encoding',
metadata={"help": "The name of the column in the datasets containing the image encodings."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int</s>
===========changed ref 1===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s> this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, B</s>
===========changed ref 2===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 2
<s>)."}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
"which is used during evaluation."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
+ log_interval: Optional[int] = field(
+ default=500,
+ metadata={
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
+ "value if set."
+ },
+ )
|
seq2seq.run_seq2seq_flax/create_learning_rate_fn
|
Modified
|
borisdayma~dalle-mini
|
5a3211fc16e2f37c6d46c1dbd44be4da4cbbe8a7
|
feat: no decay option
|
<4>:<add> if no_decay:
<add> return warmup_fn
|
# module: seq2seq.run_seq2seq_flax
def create_learning_rate_fn(
+ train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float, no_decay: bool
- train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
<0> """Returns a linear warmup, linear_decay learning rate function."""
<1> steps_per_epoch = train_ds_size // train_batch_size
<2> num_train_steps = steps_per_epoch * num_train_epochs
<3> warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
<4> decay_fn = optax.linear_schedule(
<5> init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
<6> )
<7> schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
<8> return schedule_fn
<9>
|
===========unchanged ref 0===========
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
===========changed ref 0===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default='caption',
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
encoding_column: Optional[str] = field(
default='encoding',
metadata={"help": "The name of the column in the datasets containing the image encodings."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
+ )
+ no_decay: bool = field(
+ default=False, metadata={"help": "Whether to use decay in the learning rate scheduler."}
)
max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization.</s>
===========changed ref 1===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s>
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=80, # ensure we have the same datasets cached data and avoid using too much space
metadata={"help": "The number of processes to use for the preprocessing."},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before</s>
===========changed ref 2===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 2
<s> text (useful for T5 models)."}
)
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
"which is used during evaluation."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
log_interval: Optional[int] = field(
default=40,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
log_model: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
|
seq2seq.run_seq2seq_flax/wandb_log
|
Modified
|
borisdayma~dalle-mini
|
3fef9c163364c8b97c5c468c72f8119d01edefd0
|
fix: log correct metrics
|
<3>:<add> log_metrics = {**log_metrics, 'train/step': step}
<del> log_metrics = {**metrics, 'train/step': step}
|
# module: seq2seq.run_seq2seq_flax
def wandb_log(metrics, step=None, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {f'{prefix}/k' if prefix is not None else k: jax.device_get(v) for k,v in metrics.items()}
<2> if step is not None:
<3> log_metrics = {**metrics, 'train/step': step}
<4> wandb.log(log_metrics)
<5>
|
===========unchanged ref 0===========
at: wandb
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
|
seq2seq.run_seq2seq_flax/wandb_log
|
Modified
|
borisdayma~dalle-mini
|
b20769d344a3d811b2f80fb1dfc292f4eb3021a6
|
fix: use correct key
|
<1>:<add> log_metrics = {f'{prefix}/{k}' if prefix is not None else k: jax.device_get(v) for k,v in metrics.items()}
<del> log_metrics = {f'{prefix}/k' if prefix is not None else k: jax.device_get(v) for k,v in metrics.items()}
|
# module: seq2seq.run_seq2seq_flax
def wandb_log(metrics, step=None, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {f'{prefix}/k' if prefix is not None else k: jax.device_get(v) for k,v in metrics.items()}
<2> if step is not None:
<3> log_metrics = {**log_metrics, 'train/step': step}
<4> wandb.log(log_metrics)
<5>
|
===========unchanged ref 0===========
at: wandb
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
|
seq2seq.run_seq2seq_flax/wandb_log
|
Modified
|
borisdayma~dalle-mini
|
eb591ffcd6858a807576b1cf6e5753651986108b
|
Merge pull request #23 from khalidsaifullaah/main
|
<3>:<add> log_metrics['train/step'] = step
<del> log_metrics = {**log_metrics, 'train/step': step}
|
# module: seq2seq.run_seq2seq_flax
def wandb_log(metrics, step=None, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {f'{prefix}/{k}' if prefix is not None else k: jax.device_get(v) for k,v in metrics.items()}
<2> if step is not None:
<3> log_metrics = {**log_metrics, 'train/step': step}
<4> wandb.log(log_metrics)
<5>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax.create_learning_rate_fn
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
===========changed ref 0===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default='caption',
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
encoding_column: Optional[str] = field(
default='encoding',
metadata={"help": "The name of the column in the datasets containing the image encodings."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
no_decay: bool = field(
default=False, metadata={"help": "Whether to use decay in the learning rate scheduler."}
)
max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer</s>
===========changed ref 1===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s>=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=80, # ensure we have the same datasets cached data and avoid using too much space
metadata={"help": "The number of processes to use for the preprocessing."},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text</s>
===========changed ref 2===========
# module: seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 2
<s>ful for T5 models)."}
)
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
"which is used during evaluation."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
log_interval: Optional[int] = field(
default=40,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
log_model: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
+ save_model_steps: Optional[int] = field(
+ default=3000, # about once every hour in our experiments
+ metadata={
+ "help": "For logging the model more frequently. Used only when `log_model` is set."
+ },
+ )
|
seq2seq.run_seq2seq_flax/CustomFlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
5aaf9df60f68749c868c7ec228cf3bf93c9a8fa3
|
fix: model config
|
<19>:<del> decoder_config.min_length = OUTPUT_LENGTH
<20>:<del> decoder_config.max_length = OUTPUT_LENGTH
|
# module: seq2seq.run_seq2seq_flax
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
<0> # we keep shared to easily load pre-trained weights
<1> self.shared = nn.Embed(
<2> self.config.vocab_size,
<3> self.config.d_model,
<4> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<5> dtype=self.dtype,
<6> )
<7> # a separate embedding is used for the decoder
<8> self.decoder_embed = nn.Embed(
<9> OUTPUT_VOCAB_SIZE,
<10> self.config.d_model,
<11> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<12> dtype=self.dtype,
<13> )
<14> self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
<15>
<16> # the decoder has a different config
<17> decoder_config = BartConfig(self.config.to_dict())
<18> decoder_config.max_position_embeddings = OUTPUT_LENGTH
<19> decoder_config.min_length = OUTPUT_LENGTH
<20> decoder_config.max_length = OUTPUT_LENGTH
<21> decoder_config.vocab_size = OUTPUT_VOCAB_SIZE
<22> self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
<23>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax
OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
to_dict() -> Dict[str, Any]
at: transformers.models.bart.configuration_bart
BartConfig(vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
|
seq2seq.run_seq2seq_flax/CustomFlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
28f08be4e2cc29df5235c9053fdfd3a4b78f0acb
|
Merge branch 'add-tokenizer-save' into feat-model
|
<0>:<add> # check config is valid, otherwise set default values
<add> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<add> self.config.max_position_embeddings_decoder = getattr(self.config, 'vocab_size_output', OUTPUT_LENGTH)
<add>
<9>:<add> self.config.vocab_size_output,
<del> OUTPUT_VOCAB_SIZE,
<18>:<add> decoder_config.max_position_embeddings = self.config.max_position_embeddings_decoder
<add> decoder_config.vocab_size = self.config.vocab_size_output
<del> decoder_config.max_position_embeddings = OUTPUT_LENGTH
<19>:<del> decoder_config.vocab_size = OUTPUT_VOCAB_SIZE
|
# module: seq2seq.run_seq2seq_flax
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
<0> # we keep shared to easily load pre-trained weights
<1> self.shared = nn.Embed(
<2> self.config.vocab_size,
<3> self.config.d_model,
<4> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<5> dtype=self.dtype,
<6> )
<7> # a separate embedding is used for the decoder
<8> self.decoder_embed = nn.Embed(
<9> OUTPUT_VOCAB_SIZE,
<10> self.config.d_model,
<11> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<12> dtype=self.dtype,
<13> )
<14> self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
<15>
<16> # the decoder has a different config
<17> decoder_config = BartConfig(self.config.to_dict())
<18> decoder_config.max_position_embeddings = OUTPUT_LENGTH
<19> decoder_config.vocab_size = OUTPUT_VOCAB_SIZE
<20> self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
<21>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax
OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
|
seq2seq.run_seq2seq_flax/CustomFlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
28f08be4e2cc29df5235c9053fdfd3a4b78f0acb
|
Merge branch 'add-tokenizer-save' into feat-model
|
<0>:<add> # check config is valid, otherwise set default values
<add> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<add>
<2>:<add> self.config.vocab_size_output,
<del> OUTPUT_VOCAB_SIZE,
<7>:<add> self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.config.vocab_size_output))
<del> self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, OUTPUT_VOCAB_SIZE))
|
# module: seq2seq.run_seq2seq_flax
class CustomFlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
def setup(self):
<0> self.model = CustomFlaxBartModule(config=self.config, dtype=self.dtype)
<1> self.lm_head = nn.Dense(
<2> OUTPUT_VOCAB_SIZE,
<3> use_bias=False,
<4> dtype=self.dtype,
<5> kernel_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<6> )
<7> self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, OUTPUT_VOCAB_SIZE))
<8>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax
OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
at: seq2seq.run_seq2seq_flax.CustomFlaxBartModule.setup
self.decoder_embed = nn.Embed(
self.config.vocab_size_output,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
dtype=self.dtype,
)
decoder_config = BartConfig(self.config.to_dict())
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
setup(self)
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 0===========
# module: seq2seq.run_seq2seq_flax
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
+ # check config is valid, otherwise set default values
+ self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
+ self.config.max_position_embeddings_decoder = getattr(self.config, 'vocab_size_output', OUTPUT_LENGTH)
+
# we keep shared to easily load pre-trained weights
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
dtype=self.dtype,
)
# a separate embedding is used for the decoder
self.decoder_embed = nn.Embed(
+ self.config.vocab_size_output,
- OUTPUT_VOCAB_SIZE,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
dtype=self.dtype,
)
self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
# the decoder has a different config
decoder_config = BartConfig(self.config.to_dict())
+ decoder_config.max_position_embeddings = self.config.max_position_embeddings_decoder
+ decoder_config.vocab_size = self.config.vocab_size_output
- decoder_config.max_position_embeddings = OUTPUT_LENGTH
- decoder_config.vocab_size = OUTPUT_VOCAB_SIZE
self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
|
seq2seq.run_seq2seq_flax/CustomFlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
a173dadc593642e6805ca2ddf8035fb1d2a1dbc5
|
fix: typo
|
<2>:<add> self.config.max_position_embeddings_decoder = getattr(self.config, 'max_position_embeddings_decoder', OUTPUT_LENGTH)
<del> self.config.max_position_embeddings_decoder = getattr(self.config, 'vocab_size_output', OUTPUT_LENGTH)
|
# module: seq2seq.run_seq2seq_flax
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
<0> # check config is valid, otherwise set default values
<1> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<2> self.config.max_position_embeddings_decoder = getattr(self.config, 'vocab_size_output', OUTPUT_LENGTH)
<3>
<4> # we keep shared to easily load pre-trained weights
<5> self.shared = nn.Embed(
<6> self.config.vocab_size,
<7> self.config.d_model,
<8> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<9> dtype=self.dtype,
<10> )
<11> # a separate embedding is used for the decoder
<12> self.decoder_embed = nn.Embed(
<13> self.config.vocab_size_output,
<14> self.config.d_model,
<15> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<16> dtype=self.dtype,
<17> )
<18> self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
<19>
<20> # the decoder has a different config
<21> decoder_config = BartConfig(self.config.to_dict())
<22> decoder_config.max_position_embeddings = self.config.max_position_embeddings_decoder
<23> decoder_config.vocab_size = self.config.vocab_size_output
<24> self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
<25>
|
===========unchanged ref 0===========
at: seq2seq.run_seq2seq_flax
OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
to_dict() -> Dict[str, Any]
at: transformers.models.bart.configuration_bart
BartConfig(vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
|
app.ui_gradio/run_inference
|
Modified
|
borisdayma~dalle-mini
|
8944cc5179f4ffedf249fa05a799d60acf402225
|
Attempts to tweak the UI. Brief description of the process. Link to report.
|
<2>:<add> output_title = f"""
<add> <p style="font-size:22px; font-style:bold">Best predictions</p>
<add> <p>We asked our model to generate 32 candidates for your prompt:</p>
<add>
<add> <pre>
<add>
<add> <b>{prompt}</b>
<add> </pre>
<add> <p>We then used a pre-trained CLIP model to score them according to the
<add> similarity of their text and image representations.</p>
<add>
<add> <p>This is the result:</p>
<add> """
<add> output_description = """
<add> <p>Read more about the process <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">in our report</a>.<p>
<add> <p style='text-align: center'>Created with <a href="https://github.com/borisdayma/dalle-mini">DALLE·mini</
|
# module: app.ui_gradio
def run_inference(prompt, num_images=32, num_preds=8):
<0> images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds)
<1> predictions = compose_predictions(images)
<2> output_title = 'This would be an html string to serve as title for the outputs.'
<3> output_description = 'This is another random piece of html'
<4> return (output_title, predictions, output_description)
<5>
|
===========unchanged ref 0===========
at: app.ui_gradio
compose_predictions(images, caption=None)
top_k_predictions(prompt, num_candidates=32, k=8)
|
app.ui_gradio/run_inference
|
Modified
|
borisdayma~dalle-mini
|
85eab146f13dd6dd6a6973129c455f668443e125
|
Integrate current UI in demo app.
|
<10>:<add> <p>We then used a pre-trained <a href="https://huggingface.co/openai/clip-vit-base-patch32">CLIP model</a> to score them according to the
<del> <p>We then used a pre-trained CLIP model to score them according to the
<11>:<add> similarity of the text and the image representations.</p>
<del> similarity of their text and image representations.</p>
|
# module: app.ui_gradio
def run_inference(prompt, num_images=32, num_preds=8):
<0> images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds)
<1> predictions = compose_predictions(images)
<2> output_title = f"""
<3> <p style="font-size:22px; font-style:bold">Best predictions</p>
<4> <p>We asked our model to generate 32 candidates for your prompt:</p>
<5>
<6> <pre>
<7>
<8> <b>{prompt}</b>
<9> </pre>
<10> <p>We then used a pre-trained CLIP model to score them according to the
<11> similarity of their text and image representations.</p>
<12>
<13> <p>This is the result:</p>
<14> """
<15> output_description = """
<16> <p>Read more about the process <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">in our report</a>.<p>
<17> <p style='text-align: center'>Created with <a href="https://github.com/borisdayma/dalle-mini">DALLE·mini</a></p>
<18> """
<19> return (output_title, predictions, output_description)
<20>
|
===========unchanged ref 0===========
at: app.ui_gradio
compose_predictions(images, caption=None)
top_k_predictions(prompt, num_candidates=32, k=8)
|
app.app_gradio/run_inference
|
Modified
|
borisdayma~dalle-mini
|
85eab146f13dd6dd6a6973129c455f668443e125
|
Integrate current UI in demo app.
|
<0>:<add> images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds)
<add> predictions = compose_predictions(images)
<add> output_title = f"""
<add> <p style="font-size:22px; font-style:bold">Best predictions</p>
<add> <p>We asked our model to generate 32 candidates for your prompt:</p>
<del> images = hallucinate(prompt, num_images=num_images)
<1>:<del> images = clip_top_k(prompt, images, k=num_preds)
<2>:<del> predictions_strip = captioned_strip(images, None)
<3>:<del> return predictions_strip
|
# module: app.app_gradio
+ def run_inference(prompt, num_images=32, num_preds=8):
- def run_inference(prompt, num_images=64, num_preds=8):
<0> images = hallucinate(prompt, num_images=num_images)
<1> images = clip_top_k(prompt, images, k=num_preds)
<2> predictions_strip = captioned_strip(images, None)
<3> return predictions_strip
<4>
|
===========unchanged ref 0===========
at: app.app_gradio
hallucinate(prompt, num_images=64)
clip_top_k(prompt, images, k=8)
===========changed ref 0===========
# module: app.app_gradio
+ def top_k_predictions(prompt, num_candidates=32, k=8):
+ images = hallucinate(prompt, num_images=num_candidates)
+ images = clip_top_k(prompt, images, k=k)
+ return images
+
===========changed ref 1===========
# module: app.app_gradio
- def captioned_strip(images, caption):
- increased_h = 0 if caption is None else 48
- w, h = images[0].size[0], images[0].size[1]
- img = Image.new("RGB", (len(images)*w, h + increased_h))
- for i, img_ in enumerate(images):
- img.paste(img_, (i*w, increased_h))
-
- if caption is not None:
- draw = ImageDraw.Draw(img)
- font = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40)
- draw.text((20, 3), caption, (255,255,255), font=font)
- return img
-
===========changed ref 2===========
# module: app.app_gradio
- def captioned_strip(images, caption):
- increased_h = 0 if caption is None else 48
- w, h = images[0].size[0], images[0].size[1]
- img = Image.new("RGB", (len(images)*w, h + increased_h))
- for i, img_ in enumerate(images):
- img.paste(img_, (i*w, increased_h))
-
- if caption is not None:
- draw = ImageDraw.Draw(img)
- font = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40)
- draw.text((20, 3), caption, (255,255,255), font=font)
- return img
-
===========changed ref 3===========
# module: app.ui_gradio
def run_inference(prompt, num_images=32, num_preds=8):
images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds)
predictions = compose_predictions(images)
output_title = f"""
<p style="font-size:22px; font-style:bold">Best predictions</p>
<p>We asked our model to generate 32 candidates for your prompt:</p>
<pre>
<b>{prompt}</b>
</pre>
+ <p>We then used a pre-trained <a href="https://huggingface.co/openai/clip-vit-base-patch32">CLIP model</a> to score them according to the
- <p>We then used a pre-trained CLIP model to score them according to the
+ similarity of the text and the image representations.</p>
- similarity of their text and image representations.</p>
<p>This is the result:</p>
"""
output_description = """
<p>Read more about the process <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">in our report</a>.<p>
<p style='text-align: center'>Created with <a href="https://github.com/borisdayma/dalle-mini">DALLE·mini</a></p>
"""
return (output_title, predictions, output_description)
|
app.app_gradio_ngrok/run_inference
|
Modified
|
borisdayma~dalle-mini
|
a61d80f84a701734d9a4504a52def22b888409e6
|
Fix the number of candidates reported. Remove unused function.
|
<5>:<add> <p>We asked our model to generate 128 candidates for your prompt:</p>
<del> <p>We asked our model to generate 32 candidates for your prompt:</p>
|
# module: app.app_gradio_ngrok
def run_inference(prompt):
<0> try:
<1> images = get_images_from_ngrok(prompt)
<2> predictions = compose_predictions(images)
<3> output_title = f"""
<4> <p style="font-size:22px; font-style:bold">Best predictions</p>
<5> <p>We asked our model to generate 32 candidates for your prompt:</p>
<6>
<7> <pre>
<8>
<9> <b>{prompt}</b>
<10> </pre>
<11> <p>We then used a pre-trained <a href="https://huggingface.co/openai/clip-vit-base-patch32">CLIP model</a> to score them according to the
<12> similarity of the text and the image representations.</p>
<13>
<14> <p>This is the result:</p>
<15> """
<16>
<17> output_description = """
<18> <p>Read more about the process <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">in our report</a>.<p>
<19> <p style='text-align: center'>Created with <a href="https://github.com/borisdayma/dalle-mini">DALLE·mini</a></p>
<20> """
<21>
<22> except ServiceError:
<23> output_title = f"""
<24> Sorry, there was an error retrieving the images. Please, try again later or <a href="mailto:[email protected]">contact us here</a>.
<25> """
<26> predictions = None
<27> output_description = ""
<28>
<29> return (output_title, predictions, output_description)
<30>
|
===========unchanged ref 0===========
at: app.app_gradio_ngrok
ServiceError(status_code)
===========changed ref 0===========
# module: app.app_gradio_ngrok
- def top_k_predictions(prompt, num_candidates=32, k=8):
- images = hallucinate(prompt, num_images=num_candidates)
- images = clip_top_k(prompt, images, k=k)
- return images
-
|
app.app_gradio_ngrok/get_images_from_ngrok
|
Modified
|
borisdayma~dalle-mini
|
6be3159209e099be568e768ba6a70c309746984e
|
Get backend url from environment variable.
|
<1>:<add> backend_url,
<del> "https://dd7123a7e01c.ngrok.io/generate",
|
# module: app.app_gradio_ngrok
def get_images_from_ngrok(prompt):
<0> r = requests.post(
<1> "https://dd7123a7e01c.ngrok.io/generate",
<2> json={"prompt": prompt}
<3> )
<4> if r.status_code == 200:
<5> images = r.json()["images"]
<6> images = [Image.open(BytesIO(base64.b64decode(img))) for img in images]
<7> return images
<8> else:
<9> raise ServiceError(r.status_code)
<10>
|
===========unchanged ref 0===========
at: PIL.Image
open(fp, mode="r", formats=None)
at: app.app_gradio_ngrok
backend_url = os.environ["BACKEND_SERVER"]
at: base64
b64decode(s: _decodable, altchars: Optional[bytes]=..., validate: bool=...) -> bytes
at: io
BytesIO(initial_bytes: bytes=...)
at: requests.api
post(url: Union[Text, bytes], data: _Data=..., json=..., **kwargs) -> Response
at: requests.models.Response
__attrs__ = [
"_content",
"status_code",
"headers",
"url",
"history",
"encoding",
"reason",
"cookies",
"elapsed",
"request",
]
json(**kwargs) -> Any
at: requests.models.Response.__init__
self.status_code = None
|
app.app_gradio_ngrok/run_inference
|
Modified
|
borisdayma~dalle-mini
|
3584703fd52d962ac872dd93408275ee475a44fa
|
Text modifications, make links unconditionally blue.
|
<18>:<add> <p>Read our <a style="color:blue;" href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">full report</a> for more details on how this works.<p>
<del> <p>Read more about the process <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">in our report</a>.<p>
<19>:<add> <p style='text-align: center'>Created with <a style="color:blue;" href="https://github.com/borisdayma/dalle-mini">DALL·E mini</a></p>
<del> <p style='text-align: center'>Created with <a href="https://github.com/borisdayma/dalle-mini">DALLE·mini</a></p>
|
# module: app.app_gradio_ngrok
def run_inference(prompt):
<0> try:
<1> images = get_images_from_ngrok(prompt)
<2> predictions = compose_predictions(images)
<3> output_title = f"""
<4> <p style="font-size:22px; font-style:bold">Best predictions</p>
<5> <p>We asked our model to generate 128 candidates for your prompt:</p>
<6>
<7> <pre>
<8>
<9> <b>{prompt}</b>
<10> </pre>
<11> <p>We then used a pre-trained <a href="https://huggingface.co/openai/clip-vit-base-patch32">CLIP model</a> to score them according to the
<12> similarity of the text and the image representations.</p>
<13>
<14> <p>This is the result:</p>
<15> """
<16>
<17> output_description = """
<18> <p>Read more about the process <a href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">in our report</a>.<p>
<19> <p style='text-align: center'>Created with <a href="https://github.com/borisdayma/dalle-mini">DALLE·mini</a></p>
<20> """
<21>
<22> except ServiceError:
<23> output_title = f"""
<24> Sorry, there was an error retrieving the images. Please, try again later or <a href="mailto:[email protected]">contact us here</a>.
<25> """
<26> predictions = None
<27> output_description = ""
<28>
<29> return (output_title, predictions, output_description)
<30>
|
===========unchanged ref 0===========
at: app.app_gradio_ngrok
compose_predictions(images, caption=None)
ServiceError(status_code)
get_images_from_ngrok(prompt)
|
dalle_mini.helpers/captioned_strip
|
Modified
|
borisdayma~dalle-mini
|
0dd7d80761594fdb97effa05969d8175d1d20cf6
|
feat: allow display in multi lines
|
<2>:<add> img = Image.new("RGB", (len(images)*w//rows, h*rows + increased_h))
<del> img = Image.new("RGB", (len(images)*w, h + increased_h))
<4>:<add> img.paste(img_, (i//rows*w, increased_h + (i % rows) * h))
<del> img.paste(img_, (i*w, increased_h))
|
# module: dalle_mini.helpers
+ def captioned_strip(images, caption=None, rows=1):
- def captioned_strip(images, caption):
<0> increased_h = 0 if caption is None else 48
<1> w, h = images[0].size[0], images[0].size[1]
<2> img = Image.new("RGB", (len(images)*w, h + increased_h))
<3> for i, img_ in enumerate(images):
<4> img.paste(img_, (i*w, increased_h))
<5>
<6> if caption is not None:
<7> draw = ImageDraw.Draw(img)
<8> font = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40)
<9> draw.text((20, 3), caption, (255,255,255), font=font)
<10> return img
<11>
|
===========unchanged ref 0===========
at: PIL.Image
new(mode, size, color=0)
at: PIL.Image.Image
format = None
format_description = None
_close_exclusive_fp_after_loading = True
__copy__ = copy
paste(im, box=None, mask=None)
at: PIL.ImageDraw
Draw(im, mode=None)
at: PIL.ImageDraw.ImageDraw
font = None
text(xy, text, fill=None, font=None, anchor=None, spacing=4, align="left", direction=None, features=None, language=None, stroke_width=0, stroke_fill=None, embedded_color=False, *args, **kwargs)
at: PIL.ImageFont
truetype(font=None, size=10, index=0, encoding="", layout_engine=None)
|
app.gradio.app_gradio_ngrok/run_inference
|
Modified
|
borisdayma~dalle-mini
|
a0b5dc71cf8139472b858c9856d37a68b1d76546
|
feat: update gradio app
|
<2>:<add> predictions = captioned_strip(images)
<del> predictions = compose_predictions(images)
|
# module: app.gradio.app_gradio_ngrok
def run_inference(prompt):
<0> try:
<1> images = get_images_from_ngrok(prompt)
<2> predictions = compose_predictions(images)
<3> output_title = f"""
<4> <p style="font-size:22px; font-style:bold">Best predictions</p>
<5> <p>We asked our model to generate 128 candidates for your prompt:</p>
<6>
<7> <pre>
<8>
<9> <b>{prompt}</b>
<10> </pre>
<11> <p>We then used a pre-trained <a href="https://huggingface.co/openai/clip-vit-base-patch32">CLIP model</a> to score them according to the
<12> similarity of the text and the image representations.</p>
<13>
<14> <p>This is the result:</p>
<15> """
<16>
<17> output_description = """
<18> <p>Read our <a style="color:blue;" href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">full report</a> for more details on how this works.<p>
<19> <p style='text-align: center'>Created with <a style="color:blue;" href="https://github.com/borisdayma/dalle-mini">DALL·E mini</a></p>
<20> """
<21>
<22> except ServiceError:
<23> output_title = f"""
<24> Sorry, there was an error retrieving the images. Please, try again later or <a href="mailto:[email protected]">contact us here</a>.
<25> """
<26> predictions = None
<27> output_description = ""
<28>
<29> return (output_title, predictions, output_description)
<30>
|
===========unchanged ref 0===========
at: app.gradio.app_gradio_ngrok
ServiceError(status_code)
===========changed ref 0===========
# module: app.gradio.app_gradio_ngrok
- def compose_predictions(images, caption=None):
- increased_h = 0 if caption is None else 48
- w, h = images[0].size[0], images[0].size[1]
- img = Image.new("RGB", (len(images)*w, h + increased_h))
- for i, img_ in enumerate(images):
- img.paste(img_, (i*w, increased_h))
-
- if caption is not None:
- draw = ImageDraw.Draw(img)
- font = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40)
- draw.text((20, 3), caption, (255,255,255), font=font)
- return img
-
|
app.gradio.app_gradio/run_inference
|
Modified
|
borisdayma~dalle-mini
|
a0b5dc71cf8139472b858c9856d37a68b1d76546
|
feat: update gradio app
|
<1>:<add> predictions = captioned_strip(images)
<del> predictions = compose_predictions(images)
|
# module: app.gradio.app_gradio
def run_inference(prompt, num_images=32, num_preds=8):
<0> images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds)
<1> predictions = compose_predictions(images)
<2> output_title = f"""
<3> <b>{prompt}</b>
<4> """
<5> return (output_title, predictions)
<6>
|
===========unchanged ref 0===========
at: app.gradio.app_gradio
outputs = [
gr.outputs.HTML(label=""), # To be used as title
gr.outputs.Image(label=''),
]
description = """
DALL·E-mini is an AI model that generates images from any prompt you give! Generate images from text:
"""
===========changed ref 0===========
# module: app.gradio.app_gradio
-
- def stack_reconstructions(images):
- w, h = images[0].size[0], images[0].size[1]
- img = Image.new("RGB", (len(images)*w, h))
- for i, img_ in enumerate(images):
- img.paste(img_, (i*w,0))
- return img
-
===========changed ref 1===========
# module: app.gradio.app_gradio
- def plot_images(images):
- fig = plt.figure(figsize=(40, 20))
- columns = 4
- rows = 2
- plt.subplots_adjust(hspace=0, wspace=0)
-
- for i in range(1, columns*rows +1):
- fig.add_subplot(rows, columns, i)
- plt.imshow(images[i-1])
- plt.gca().axes.get_yaxis().set_visible(False)
- plt.show()
-
===========changed ref 2===========
# module: app.gradio.app_gradio_ngrok
- def compose_predictions(images, caption=None):
- increased_h = 0 if caption is None else 48
- w, h = images[0].size[0], images[0].size[1]
- img = Image.new("RGB", (len(images)*w, h + increased_h))
- for i, img_ in enumerate(images):
- img.paste(img_, (i*w, increased_h))
-
- if caption is not None:
- draw = ImageDraw.Draw(img)
- font = ImageFont.truetype("/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40)
- draw.text((20, 3), caption, (255,255,255), font=font)
- return img
-
===========changed ref 3===========
# module: app.gradio.app_gradio_ngrok
def run_inference(prompt):
try:
images = get_images_from_ngrok(prompt)
+ predictions = captioned_strip(images)
- predictions = compose_predictions(images)
output_title = f"""
<p style="font-size:22px; font-style:bold">Best predictions</p>
<p>We asked our model to generate 128 candidates for your prompt:</p>
<pre>
<b>{prompt}</b>
</pre>
<p>We then used a pre-trained <a href="https://huggingface.co/openai/clip-vit-base-patch32">CLIP model</a> to score them according to the
similarity of the text and the image representations.</p>
<p>This is the result:</p>
"""
output_description = """
<p>Read our <a style="color:blue;" href="https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA">full report</a> for more details on how this works.<p>
<p style='text-align: center'>Created with <a style="color:blue;" href="https://github.com/borisdayma/dalle-mini">DALL·E mini</a></p>
"""
except ServiceError:
output_title = f"""
Sorry, there was an error retrieving the images. Please, try again later or <a href="mailto:[email protected]">contact us here</a>.
"""
predictions = None
output_description = ""
return (output_title, predictions, output_description)
|
dalle_mini.text/TextNormalizer.__call__
|
Modified
|
borisdayma~dalle-mini
|
a09ea254f7849972e966630d5a89a30258fa775b
|
feat: add ftfy
|
<0>:<add> # fix some characters
<add> t = ftfy.fix_text(t)
|
# module: dalle_mini.text
class TextNormalizer:
def __call__(self, t, clip=False):
<0> # fix html
<1> t = fix_html(t)
<2> if not clip:
<3> # decode and simplify text: see unidecode library
<4> t = unidecode(t)
<5> # lower case
<6> t = t.lower()
<7> # replace <PERSON> (for CC12M)
<8> t = replace_person_token(t)
<9> # remove wiki reference (for WIT)
<10> t = remove_wiki_ref(t)
<11> # remove html tags
<12> t = remove_html_tags(t)
<13> # remove urls
<14> t = remove_urls(t)
<15> # remove commas in numbers
<16> t = remove_comma_numbers(t)
<17> if not clip:
<18> # handle dots in numbers and quotes - Part 1
<19> t = pre_process_dot_numbers(t)
<20> t = pre_process_quotes(t)
<21> # handle special characters
<22> t = handle_special_chars(t)
<23> # handle hashtags
<24> t = expand_hashtags(t, self._hashtag_processor)
<25> # ignore useless characters
<26> t = ignore_chars(t)
<27> # simplify quotes
<28> t = simplify_quotes(t)
<29> # all punctuation becomes commas
<30> t = replace_punctuation_with_commas(t)
<31> # handle dots in numbers and quotes - Part 2
<32> t = post_process_dot_numbers(t)
<33> t = post_process_quotes(t)
<34> # handle repeating characters
<35> t = remove_repeating_chars(t)
<36> # merge commas
<37> t = merge_commas(t)
<38> # merge quotes
<39> t = merge_quotes(t)
<40> # remove multiple spaces
<41> t = remove_extra_spaces(t)
<42> # remove first and last comma
<43> t = remove_first_last_commas(t)
<44> # always start with</s>
|
===========below chunk 0===========
# module: dalle_mini.text
class TextNormalizer:
def __call__(self, t, clip=False):
# offset: 1
return f" {t}" if not clip else t
===========unchanged ref 0===========
at: dalle_mini.text
replace_person_token(t)
fix_html(t)
replace_punctuation_with_commas(t)
simplify_quotes(t)
merge_quotes(t)
remove_comma_numbers(t)
pre_process_dot_numbers(t)
post_process_dot_numbers(t)
pre_process_quotes(t)
post_process_quotes(t)
merge_commas(t)
handle_special_chars(t)
expand_hashtags(t, hashtag_processor)
ignore_chars(t)
remove_extra_spaces(t)
remove_repeating_chars(t)
remove_urls(t)
remove_html_tags(t)
remove_wiki_ref(t)
at: dalle_mini.text.TextNormalizer.__init__
self._hashtag_processor = HashtagProcessor()
|
dev.seq2seq.run_seq2seq_flax/DataTrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
a96f44dfc4393816bc2e43f2c3df79fff46d8214
|
feat: handle streaming
|
<0>:<add> if (
<add> self.dataset_name is None
<add> and self.train_file is None
<add> and self.validation_file is None
<add> ):
<add> raise ValueError(
<del> if self.dataset_name is None and self.train_file is None and self.validation_file is None:
<1>:<add> "Need either a dataset name or a training/validation file."
<del> raise ValueError("Need either a dataset name or a training/validation file.")
<2>:<add> )
<5>:<add> assert extension in [
<add> "tsv",
<add> "csv",
<add> "json",
<add> "jsonl",
<add> ], "`train_file` should be a tsv, csv or json file."
<del> assert extension in ["tsv", "csv", "json"], "`train_file` should be a tsv, csv or json file."
<8>:<add> assert extension in [
<add> "tsv",
<add> "csv",
<add> "json",
<add> "jsonl",
<add> ], "`validation_file` should be a tsv, csv or json file."
<del> assert extension in ["tsv", "csv", "json"], "`validation_file` should be a tsv, csv or json file."
|
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
def __post_init__(self):
<0> if self.dataset_name is None and self.train_file is None and self.validation_file is None:
<1> raise ValueError("Need either a dataset name or a training/validation file.")
<2> else:
<3> if self.train_file is not None:
<4> extension = self.train_file.split(".")[-1]
<5> assert extension in ["tsv", "csv", "json"], "`train_file` should be a tsv, csv or json file."
<6> if self.validation_file is not None:
<7> extension = self.validation_file.split(".")[-1]
<8> assert extension in ["tsv", "csv", "json"], "`validation_file` should be a tsv, csv or json file."
<9> if self.val_max_target_length is None:
<10> self.val_max_target_length = self.max_target_length
<11>
|
===========unchanged ref 0===========
at: dataclasses
field(*, default_factory: Callable[[], _T], init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
field(*, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> Any
field(*, default: _T, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
at: dev.seq2seq.run_seq2seq_flax.DataTrainingArguments
dataset_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)."},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": "The configuration name of the dataset to use (via the datasets library)."
},
)
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
===========unchanged ref 1===========
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
)
len_train: Optional[int] = field(
default=None,
metadata={"help": "Length of training dataset, required for streaming"},
)
len_eval: Optional[int] = field(
default=None,
metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
no_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
)
max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
===========unchanged ref 2===========
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
normalize_text: bool = field(
default=False,
metadata={"help": "Normalize/Simplify text"},
)
preprocessing_num_workers: Optional[int] = field(
default=80, # ensure we have the same datasets cached data and avoid using too much space
metadata={"help": "The number of processes to use for the preprocessing."},
)
source_prefix: Optional[str] = field(
default=None,
metadata={
"help": "A prefix to add before every source text (useful for T5 models)."
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
logger = pylogging.getLogger(__name__)
-
- try:
- nltk.data.find("tokenizers/punkt")
- except (LookupError, OSError):
- if is_offline_mode():
- raise LookupError(
- "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
- )
- with FileLock(".lock") as lock:
- nltk.download("punkt", quiet=True)
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
# Model hyperparameters, for convenience
# TODO: the model has now it's own definition file and should be imported
OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
BOS_TOKEN_ID = 16384
+ BASE_MODEL = "facebook/bart-large-cnn" # we currently have issues with bart-large
- BASE_MODEL = 'facebook/bart-large-cnn' # we currently have issues with bart-large
|
dev.seq2seq.run_seq2seq_flax/DataTrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
074c5e12a8163030263447d3c699c0bfd417e566
|
feat: log epoch + check params
|
<0>:<del> if (
<1>:<add> if self.dataset_repo_or_path is None:
<del> self.dataset_name is None
<2>:<del> and self.train_file is None
<3>:<del> and self.validation_file is None
<4>:<del> ):
<5>:<del> raise ValueError(
<6>:<del> "Need either a dataset name or a training/validation file."
<7>:<del> )
<8>:<add> raise ValueError("Need a dataset repository or path.")
<add> if self.train_file is None or self.validation_file is None:
<add> raise ValueError("Need training/validation file.")
<27>:<add> if self.streaming and (self.len_train is None or self.len_eval is None):
<add> raise ValueError(
<add> "Streaming requires providing length of training and validation datasets"
<add> )
|
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
def __post_init__(self):
<0> if (
<1> self.dataset_name is None
<2> and self.train_file is None
<3> and self.validation_file is None
<4> ):
<5> raise ValueError(
<6> "Need either a dataset name or a training/validation file."
<7> )
<8> else:
<9> if self.train_file is not None:
<10> extension = self.train_file.split(".")[-1]
<11> assert extension in [
<12> "tsv",
<13> "csv",
<14> "json",
<15> "jsonl",
<16> ], "`train_file` should be a tsv, csv or json file."
<17> if self.validation_file is not None:
<18> extension = self.validation_file.split(".")[-1]
<19> assert extension in [
<20> "tsv",
<21> "csv",
<22> "json",
<23> "jsonl",
<24> ], "`validation_file` should be a tsv, csv or json file."
<25> if self.val_max_target_length is None:
<26> self.val_max_target_length = self.max_target_length
<27>
|
===========unchanged ref 0===========
at: dev.seq2seq.run_seq2seq_flax.DataTrainingArguments
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
)
len_train: Optional[int] = field(
default=None,
metadata={"help": "Length of training dataset, required for streaming"},
)
len_eval: Optional[int] = field(
default=None,
metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
no_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
)
===========unchanged ref 1===========
max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=OUTPUT_LENGTH,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
normalize_text: bool = field(
default=False,
metadata={"help": "Normalize/Simplify text"},
)
preprocessing_num_workers: Optional[int] = field(
default=80, # ensure we have the same datasets cached data and avoid using too much space
metadata={"help": "The number of processes to use for the preprocessing."},
)
source_prefix: Optional[str] = field(
default=None,
metadata={
"help": "A prefix to add before every source text (useful for T5 models)."
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
===========unchanged ref 2===========
log_interval: Optional[int] = field(
default=40,
metadata={"help": "Log frequency for metrics"},
)
log_model: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
save_model_steps: Optional[int] = field(
default=5000, # about once every 1.5h in our experiments
metadata={
"help": "For logging the model more frequently. Used only when `log_model` is set."
},
)
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
- dataset_name: Optional[str] = field(
- default=None,
- metadata={"help": "The name of the dataset to use (via the datasets library)."},
- )
- dataset_config_name: Optional[str] = field(
- default=None,
- metadata={
- "help": "The configuration name of the dataset to use (via the datasets library)."
- },
- )
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
)
len_train: Optional[int] = field(
default=None,
metadata={"help": "Length of training dataset, required for streaming"},
)
len_eval: Optional[int] = field(
default=None,
metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source</s>
|
dev.seq2seq.run_seq2seq_flax/wandb_log
|
Modified
|
borisdayma~dalle-mini
|
4a4820f6340327d5317136d63d858b386200f844
|
feat: get rid of global_step + log more metrics
|
<2>:<add> f"{prefix}/{k}" if prefix is not None else k: v for k, v in metrics.items()
<del> f"{prefix}/{k}" if prefix is not None else k: jax.device_get(v)
<3>:<del> for k, v in metrics.items()
<6>:<add> log_metrics["train/step"] = unreplicate(step)
<del> log_metrics["train/step"] = step
|
# module: dev.seq2seq.run_seq2seq_flax
def wandb_log(metrics, step=None, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {
<2> f"{prefix}/{k}" if prefix is not None else k: jax.device_get(v)
<3> for k, v in metrics.items()
<4> }
<5> if step is not None:
<6> log_metrics["train/step"] = step
<7> wandb.log(log_metrics)
<8>
|
===========unchanged ref 0===========
at: wandb
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
|
dev.seq2seq.run_seq2seq_flax/wandb_log
|
Modified
|
borisdayma~dalle-mini
|
335110d0d1308d3cb19f57d46ded8b5f7f5a40e7
|
Merge pull request #91 from borisdayma/feat-inf
|
<5>:<add> log_metrics["train/step"] = step
<del> log_metrics["train/step"] = unreplicate(step)
|
# module: dev.seq2seq.run_seq2seq_flax
def wandb_log(metrics, step=None, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {
<2> f"{prefix}/{k}" if prefix is not None else k: v for k, v in metrics.items()
<3> }
<4> if step is not None:
<5> log_metrics["train/step"] = unreplicate(step)
<6> wandb.log(log_metrics)
<7>
|
===========unchanged ref 0===========
at: dev.seq2seq.run_seq2seq_flax.wandb_log
log_metrics = {
f"{prefix}/{k}" if prefix is not None else k: v for k, v in metrics.items()
}
at: wandb
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=BASE_MODEL,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={
"help": "If training from scratch, pass a model type from the list: "
+ ", ".join(MODEL_TYPES)
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
- },
- )
- tokenizer_name: Optional[str] = field(
- default=None,
- metadata={
- "help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from s3"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
from_checkpoint: Optional[str] = field(
default=None,
metadata={
"help": "Loads a pretrained wandb checkpoint. Use artifact reference."
},
)
|
dalle_mini.text/fix_html
|
Modified
|
borisdayma~dalle-mini
|
d054d1b9c3e7b1d8a3703633678cb4926c4b0000
|
feat: simplify fix_html
|
<0>:<del> "Adapted from fastai"
<1>:<del> t = (
<2>:<del> t.replace("#39;", "'")
<3>:<del> .replace("&", "&")
<4>:<del> .replace("amp;", "&")
<5>:<del> .replace("#146;", "'")
<6>:<del> .replace("nbsp;", " ")
<7>:<del> .replace("#36;", "$")
<8>:<del> .replace("\\n", "\n")
<9>:<del> .replace("quot;", "'")
<10>:<del> .replace("<br />", "\n")
<11>:<del> .replace('\\"', '"')
<12>:<del> .replace("<unk>", " ")
<13>:<del> .replace(" @.@ ", ".")
<14>:<del> .replace(" @-@ ", "-")
<15>:<del> )
<16>:<add> # from OpenAI CLIP
<add> return html.unescape(html.unescape(t)
<del> return html.unescape(t)
|
# module: dalle_mini.text
def fix_html(t):
<0> "Adapted from fastai"
<1> t = (
<2> t.replace("#39;", "'")
<3> .replace("&", "&")
<4> .replace("amp;", "&")
<5> .replace("#146;", "'")
<6> .replace("nbsp;", " ")
<7> .replace("#36;", "$")
<8> .replace("\\n", "\n")
<9> .replace("quot;", "'")
<10> .replace("<br />", "\n")
<11> .replace('\\"', '"')
<12> .replace("<unk>", " ")
<13> .replace(" @.@ ", ".")
<14> .replace(" @-@ ", "-")
<15> )
<16> return html.unescape(t)
<17>
|
===========unchanged ref 0===========
at: html
unescape(s: AnyStr) -> AnyStr
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.text/fix_html
|
Modified
|
borisdayma~dalle-mini
|
41b680bf5adf68411b86c5b48074364e5b809cd5
|
fix: typo
|
<1>:<add> return html.unescape(html.unescape(t))
<del> return html.unescape(html.unescape(t)
|
# module: dalle_mini.text
def fix_html(t):
<0> # from OpenAI CLIP
<1> return html.unescape(html.unescape(t)
<2>
|
===========unchanged ref 0===========
at: html
unescape(s: AnyStr) -> AnyStr
|
dalle_mini.text/HashtagProcessor.__init__
|
Modified
|
borisdayma~dalle-mini
|
a96c3477899abfc27b0ca4b4f56c19cf51bdf4d5
|
feat(text): use hf_hub for wiki word count
|
<0>:<add> wiki_word_frequency = hf_hub_download(
<add> "dalle-mini/dalle-mini", filename="enwiki-words-frequency.txt"
<add> )
<1>:<add> l.split()[0] for l in Path(wiki_word_frequency).read_text().splitlines()
<del> l.split()[0] for l in Path(get_wiki_file()).read_text().splitlines()
|
# module: dalle_mini.text
class HashtagProcessor:
# Adapted from wordninja library
# We use our wikipedia word count + a good heuristic to make it work
def __init__(self):
<0> self._word_cost = (
<1> l.split()[0] for l in Path(get_wiki_file()).read_text().splitlines()
<2> )
<3> self._word_cost = {
<4> str(k): math.log(float(i + 1)) for i, k in enumerate(self._word_cost)
<5> }
<6> self._max_word = max(len(x) for x in self._word_cost.keys())
<7> self._SPLIT_RE = re.compile("[^a-zA-Z0-9']+")
<8>
|
===========unchanged ref 0===========
at: dalle_mini.text.HashtagProcessor.__init__
self._word_cost = {
str(k): math.log(float(i + 1)) for i, k in enumerate(self._word_cost)
}
self._word_cost = (
l.split()[0] for l in Path(wiki_word_frequency).read_text().splitlines()
)
at: re
compile(pattern: AnyStr, flags: _FlagsType=...) -> Pattern[AnyStr]
compile(pattern: Pattern[AnyStr], flags: _FlagsType=...) -> Pattern[AnyStr]
at: typing.Pattern
flags: int
groupindex: Mapping[str, int]
groups: int
pattern: AnyStr
split(string: AnyStr, maxsplit: int=...) -> list[AnyStr]
===========changed ref 0===========
# module: dalle_mini.text
"""
Utilities for processing text.
"""
-
===========changed ref 1===========
# module: dalle_mini.text
- def get_wiki_file():
- if not Path(WIKI_STATS_LOCAL).exists():
- r = requests.get(WIKI_STATS_URL, stream=True)
- with open(WIKI_STATS_LOCAL, "wb") as fd:
- for chunk in r.iter_content(chunk_size=128):
- fd.write(chunk)
- return WIKI_STATS_LOCAL
-
===========changed ref 2===========
# module: dalle_mini.text
- WIKI_STATS_URL = "https://github.com/borisdayma/wikipedia-word-frequency/raw/feat-update/results/enwiki-20210820-words-frequency.txt"
- WIKI_STATS_LOCAL = Path(WIKI_STATS_URL).parts[-1]
# based on wiki word occurence
person_token = [("a person", 282265), ("someone", 121194), ("somebody", 12219)]
temp_token = "xtokx" # avoid repeating chars
|
dalle_mini.text/expand_hashtags
|
Modified
|
borisdayma~dalle-mini
|
849c5f392614cb70f203ddca5f2ac1dea7bc3a6b
|
feat(text): few improvements
|
<1>:<add> return re.sub("#(\w+)", lambda m: " , " + hashtag_processor(m.group(1)), t)
<del> return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t)
|
# module: dalle_mini.text
def expand_hashtags(t, hashtag_processor):
<0> "Remove # and try to split words"
<1> return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t)
<2>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.text/TextNormalizer.__call__
|
Modified
|
borisdayma~dalle-mini
|
849c5f392614cb70f203ddca5f2ac1dea7bc3a6b
|
feat(text): few improvements
|
<4>:<del> if not clip:
<5>:<add> # decode and simplify text: see unidecode library
<del> # decode and simplify text: see unidecode library
<6>:<add> t = unidecode(t)
<del> t = unidecode(t)
<19>:<del> if not clip:
<20>:<add> # handle dots in numbers and quotes - Part 1
<del> # handle dots in numbers and quotes - Part 1
<21>:<add> t = pre_process_dot_numbers(t)
<del> t = pre_process_dot_numbers(t)
<22>:<add> t = pre_process_quotes(t)
<del> t = pre_process_quotes(t)
<23>:<add> # handle special characters
<del> # handle special characters
<24>:<add> t = handle_special_chars(t)
<del> t = handle_special_chars(t)
<25>:<add> # handle hashtags
<del> # handle hashtags
<26>:<add> t = expand_hashtags(t, self._hashtag_processor)
<del> t = expand_hashtags(t, self._hashtag_processor)
<27>:<add> # ignore useless characters
<del> # ignore useless characters
<28>:<add> t = ignore_chars(t)
<del> t = ignore_chars(t)
<29>:<add> # simplify quotes
<del> # simplify quotes
<30>:<add> t = simplify_quotes(t)
<del> t = simplify_quotes(t)
<31>:<add> # all punctuation becomes commas
<del> # all punctuation becomes commas
<32>:<add> t = replace_punctuation_with_commas(t)
<del> t = replace_punctuation_with_commas(t)
<33>:<add> # handle dots in numbers and quotes - Part 2
<del> # handle dots in numbers and quotes - Part 2
<34>:<add> t = post_process_dot_numbers(t)
|
# module: dalle_mini.text
class TextNormalizer:
+ def __call__(self, t):
- def __call__(self, t, clip=False):
<0> # fix some characters
<1> t = ftfy.fix_text(t)
<2> # fix html
<3> t = fix_html(t)
<4> if not clip:
<5> # decode and simplify text: see unidecode library
<6> t = unidecode(t)
<7> # lower case
<8> t = t.lower()
<9> # replace <PERSON> (for CC12M)
<10> t = replace_person_token(t)
<11> # remove wiki reference (for WIT)
<12> t = remove_wiki_ref(t)
<13> # remove html tags
<14> t = remove_html_tags(t)
<15> # remove urls
<16> t = remove_urls(t)
<17> # remove commas in numbers
<18> t = remove_comma_numbers(t)
<19> if not clip:
<20> # handle dots in numbers and quotes - Part 1
<21> t = pre_process_dot_numbers(t)
<22> t = pre_process_quotes(t)
<23> # handle special characters
<24> t = handle_special_chars(t)
<25> # handle hashtags
<26> t = expand_hashtags(t, self._hashtag_processor)
<27> # ignore useless characters
<28> t = ignore_chars(t)
<29> # simplify quotes
<30> t = simplify_quotes(t)
<31> # all punctuation becomes commas
<32> t = replace_punctuation_with_commas(t)
<33> # handle dots in numbers and quotes - Part 2
<34> t = post_process_dot_numbers(t)
<35> t = post_process_quotes(t)
<36> # handle repeating characters
<37> t = remove_repeating_chars(t)
<38> # merge commas
<39> t = merge_commas(t)
<40> # merge quotes
<41> t = merge_quotes(t)
<42> # remove multiple spaces
<43> t = remove_extra_spaces</s>
|
===========below chunk 0===========
# module: dalle_mini.text
class TextNormalizer:
+ def __call__(self, t):
- def __call__(self, t, clip=False):
# offset: 1
# remove first and last comma
t = remove_first_last_commas(t)
# always start with a space
return f" {t}" if not clip else t
===========unchanged ref 0===========
at: dalle_mini.text
replace_person_token(t)
fix_html(t)
replace_punctuation_with_commas(t)
simplify_quotes(t)
merge_quotes(t)
remove_comma_numbers(t)
pre_process_dot_numbers(t)
post_process_dot_numbers(t)
pre_process_quotes(t)
post_process_quotes(t)
merge_commas(t)
handle_special_chars(t)
expand_hashtags(t, hashtag_processor)
ignore_chars(t)
remove_extra_spaces(t)
remove_repeating_chars(t)
remove_urls(t)
remove_html_tags(t)
remove_first_last_commas(t)
remove_wiki_ref(t)
at: dalle_mini.text.TextNormalizer.__init__
self._hashtag_processor = HashtagProcessor()
===========changed ref 0===========
# module: dalle_mini.text
def expand_hashtags(t, hashtag_processor):
"Remove # and try to split words"
+ return re.sub("#(\w+)", lambda m: " , " + hashtag_processor(m.group(1)), t)
- return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t)
|
dalle_mini.text/handle_special_chars
|
Modified
|
borisdayma~dalle-mini
|
229cdc008b48e63883f5d06792cc10d76faad624
|
feat: keep %
|
<3>:<add> # always add space around & or %
<del> # always add space around &
<4>:<add> return re.sub("([%&])", r" \1 ", t)
<del> return re.sub("&", " & ", t)
|
# module: dalle_mini.text
def handle_special_chars(t):
<0> "Handle special characters"
<1> # replace "-" with a space when between words without space
<2> t = re.sub("([a-zA-Z])-([a-zA-Z])", r"\1 \2", t)
<3> # always add space around &
<4> return re.sub("&", " & ", t)
<5>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.text/handle_special_chars
|
Modified
|
borisdayma~dalle-mini
|
046560574264b52e991fcfb1c9da9f2b2f4aaa24
|
feat(text): handle dates & prices
|
<3>:<add> # always add space around & or % or / or $
<del> # always add space around & or %
<4>:<add> return re.sub("([%&\/$])", r" \1 ", t)
<del> return re.sub("([%&])", r" \1 ", t)
|
# module: dalle_mini.text
def handle_special_chars(t):
<0> "Handle special characters"
<1> # replace "-" with a space when between words without space
<2> t = re.sub("([a-zA-Z])-([a-zA-Z])", r"\1 \2", t)
<3> # always add space around & or %
<4> return re.sub("([%&])", r" \1 ", t)
<5>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.text/replace_punctuation_with_commas
|
Modified
|
borisdayma~dalle-mini
|
7b58e88dce169d02ef7eefe04d732be7bc7620cf
|
feat(text): improvements on pre-processing
|
<0>:<add> return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
<del> return re.sub("""([()[\].,|:;?!=+~\-])""", ",", t)
|
# module: dalle_mini.text
def replace_punctuation_with_commas(t):
<0> return re.sub("""([()[\].,|:;?!=+~\-])""", ",", t)
<1>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.text/pre_process_dot_numbers
|
Modified
|
borisdayma~dalle-mini
|
7b58e88dce169d02ef7eefe04d732be7bc7620cf
|
feat(text): improvements on pre-processing
|
<0>:<add> return re.sub("(\w)\.(\w)", fr"\1{temp_token}dot{temp_token}\2", t)
<del> return re.sub("(\d)\.(\d)", fr"\1{temp_token}dot{temp_token}\2", t)
|
# module: dalle_mini.text
def pre_process_dot_numbers(t):
<0> return re.sub("(\d)\.(\d)", fr"\1{temp_token}dot{temp_token}\2", t)
<1>
|
===========unchanged ref 0===========
at: dalle_mini.text
temp_token = "xtokx" # avoid repeating chars
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
===========changed ref 0===========
# module: dalle_mini.text
def replace_punctuation_with_commas(t):
+ return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
- return re.sub("""([()[\].,|:;?!=+~\-])""", ",", t)
|
dalle_mini.text/handle_special_chars
|
Modified
|
borisdayma~dalle-mini
|
7b58e88dce169d02ef7eefe04d732be7bc7620cf
|
feat(text): improvements on pre-processing
|
<2>:<add> t = re.sub("(\w)-(\w)", r"\1 \2", t)
<del> t = re.sub("([a-zA-Z])-([a-zA-Z])", r"\1 \2", t)
<3>:<add> # always add space around some characters
<del> # always add space around & or % or / or $
<4>:<add> return re.sub("([%&\/$*])", r" \1 ", t)
<del> return re.sub("([%&\/$])", r" \1 ", t)
|
# module: dalle_mini.text
def handle_special_chars(t):
<0> "Handle special characters"
<1> # replace "-" with a space when between words without space
<2> t = re.sub("([a-zA-Z])-([a-zA-Z])", r"\1 \2", t)
<3> # always add space around & or % or / or $
<4> return re.sub("([%&\/$])", r" \1 ", t)
<5>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
===========changed ref 0===========
# module: dalle_mini.text
+ def post_process_dates(t):
+ return re.sub(f"{temp_token}slash{temp_token}", "/", t)
+
===========changed ref 1===========
# module: dalle_mini.text
+ def pre_process_dates(t):
+ return re.sub("(\d)/(\d)", fr"\1{temp_token}slash{temp_token}\2", t)
+
===========changed ref 2===========
# module: dalle_mini.text
def pre_process_dot_numbers(t):
+ return re.sub("(\w)\.(\w)", fr"\1{temp_token}dot{temp_token}\2", t)
- return re.sub("(\d)\.(\d)", fr"\1{temp_token}dot{temp_token}\2", t)
===========changed ref 3===========
# module: dalle_mini.text
def replace_punctuation_with_commas(t):
+ return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
- return re.sub("""([()[\].,|:;?!=+~\-])""", ",", t)
|
dalle_mini.text/expand_hashtags
|
Modified
|
borisdayma~dalle-mini
|
7b58e88dce169d02ef7eefe04d732be7bc7620cf
|
feat(text): improvements on pre-processing
|
<1>:<add> return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t)
<del> return re.sub("#(\w+)", lambda m: " , " + hashtag_processor(m.group(1)), t)
|
# module: dalle_mini.text
def expand_hashtags(t, hashtag_processor):
<0> "Remove # and try to split words"
<1> return re.sub("#(\w+)", lambda m: " , " + hashtag_processor(m.group(1)), t)
<2>
|
===========changed ref 0===========
# module: dalle_mini.text
+ def post_process_dates(t):
+ return re.sub(f"{temp_token}slash{temp_token}", "/", t)
+
===========changed ref 1===========
# module: dalle_mini.text
+ def pre_process_dates(t):
+ return re.sub("(\d)/(\d)", fr"\1{temp_token}slash{temp_token}\2", t)
+
===========changed ref 2===========
# module: dalle_mini.text
def pre_process_dot_numbers(t):
+ return re.sub("(\w)\.(\w)", fr"\1{temp_token}dot{temp_token}\2", t)
- return re.sub("(\d)\.(\d)", fr"\1{temp_token}dot{temp_token}\2", t)
===========changed ref 3===========
# module: dalle_mini.text
def replace_punctuation_with_commas(t):
+ return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
- return re.sub("""([()[\].,|:;?!=+~\-])""", ",", t)
===========changed ref 4===========
# module: dalle_mini.text
def handle_special_chars(t):
"Handle special characters"
# replace "-" with a space when between words without space
+ t = re.sub("(\w)-(\w)", r"\1 \2", t)
- t = re.sub("([a-zA-Z])-([a-zA-Z])", r"\1 \2", t)
+ # always add space around some characters
- # always add space around & or % or / or $
+ return re.sub("([%&\/$*])", r" \1 ", t)
- return re.sub("([%&\/$])", r" \1 ", t)
|
dalle_mini.text/TextNormalizer.__call__
|
Modified
|
borisdayma~dalle-mini
|
7b58e88dce169d02ef7eefe04d732be7bc7620cf
|
feat(text): improvements on pre-processing
|
<21>:<add> t = pre_process_dates(t)
<34>:<add> t = post_process_dates(t)
|
# module: dalle_mini.text
class TextNormalizer:
def __call__(self, t):
<0> # fix some characters
<1> t = ftfy.fix_text(t)
<2> # fix html
<3> t = fix_html(t)
<4> # decode and simplify text: see unidecode library
<5> t = unidecode(t)
<6> # lower case
<7> t = t.lower()
<8> # replace <PERSON> (for CC12M)
<9> t = replace_person_token(t)
<10> # remove wiki reference (for WIT)
<11> t = remove_wiki_ref(t)
<12> # remove html tags
<13> t = remove_html_tags(t)
<14> # remove urls
<15> t = remove_urls(t)
<16> # remove commas in numbers
<17> t = remove_comma_numbers(t)
<18> # handle dots in numbers and quotes - Part 1
<19> t = pre_process_dot_numbers(t)
<20> t = pre_process_quotes(t)
<21> # handle special characters
<22> t = handle_special_chars(t)
<23> # handle hashtags
<24> t = expand_hashtags(t, self._hashtag_processor)
<25> # ignore useless characters
<26> t = ignore_chars(t)
<27> # simplify quotes
<28> t = simplify_quotes(t)
<29> # all punctuation becomes commas
<30> t = replace_punctuation_with_commas(t)
<31> # handle dots in numbers and quotes - Part 2
<32> t = post_process_dot_numbers(t)
<33> t = post_process_quotes(t)
<34> # handle repeating characters
<35> t = remove_repeating_chars(t)
<36> # merge quotes
<37> t = merge_quotes(t)
<38> # merge commas
<39> t = merge_commas(t)
<40> # remove multiple spaces
<41> t = remove_extra_spaces(t)
<42> # remove first and last comma
<43> t = remove_first_last_commas(t)
<44> </s>
|
===========below chunk 0===========
# module: dalle_mini.text
class TextNormalizer:
def __call__(self, t):
# offset: 1
return f" {t}"
===========unchanged ref 0===========
at: dalle_mini.text
HashtagProcessor()
replace_person_token(t)
fix_html(t)
replace_punctuation_with_commas(t)
simplify_quotes(t)
remove_comma_numbers(t)
pre_process_dot_numbers(t)
post_process_dot_numbers(t)
pre_process_quotes(t)
post_process_quotes(t)
pre_process_dates(t)
post_process_dates(t)
handle_special_chars(t)
expand_hashtags(t, hashtag_processor)
ignore_chars(t)
remove_repeating_chars(t)
remove_urls(t)
remove_html_tags(t)
remove_wiki_ref(t)
===========changed ref 0===========
# module: dalle_mini.text
+ def post_process_dates(t):
+ return re.sub(f"{temp_token}slash{temp_token}", "/", t)
+
===========changed ref 1===========
# module: dalle_mini.text
+ def pre_process_dates(t):
+ return re.sub("(\d)/(\d)", fr"\1{temp_token}slash{temp_token}\2", t)
+
===========changed ref 2===========
# module: dalle_mini.text
def expand_hashtags(t, hashtag_processor):
"Remove # and try to split words"
+ return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t)
- return re.sub("#(\w+)", lambda m: " , " + hashtag_processor(m.group(1)), t)
===========changed ref 3===========
# module: dalle_mini.text
def pre_process_dot_numbers(t):
+ return re.sub("(\w)\.(\w)", fr"\1{temp_token}dot{temp_token}\2", t)
- return re.sub("(\d)\.(\d)", fr"\1{temp_token}dot{temp_token}\2", t)
===========changed ref 4===========
# module: dalle_mini.text
def replace_punctuation_with_commas(t):
+ return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
- return re.sub("""([()[\].,|:;?!=+~\-])""", ",", t)
===========changed ref 5===========
# module: dalle_mini.text
def handle_special_chars(t):
"Handle special characters"
# replace "-" with a space when between words without space
+ t = re.sub("(\w)-(\w)", r"\1 \2", t)
- t = re.sub("([a-zA-Z])-([a-zA-Z])", r"\1 \2", t)
+ # always add space around some characters
- # always add space around & or % or / or $
+ return re.sub("([%&\/$*])", r" \1 ", t)
- return re.sub("([%&\/$])", r" \1 ", t)
===========changed ref 6===========
# module: dalle_mini.text
+ _re_ignore_chars = r"[_#\\]"
- _re_ignore_chars = """[_#\\]"""
|
dalle_mini.text/replace_punctuation_with_commas
|
Modified
|
borisdayma~dalle-mini
|
bf25d32a6e0d7ee8bb5d1b6f2e748b9ead6124dd
|
feat(text): more char
|
<0>:<add> return re.sub("[()[\].,|:;?!=+~\-\/{}]", ",", t)
<del> return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
|
# module: dalle_mini.text
def replace_punctuation_with_commas(t):
<0> return re.sub("([()[\].,|:;?!=+~\-\/])", ",", t)
<1>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.model/CustomFlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
803c7df211b776d5c26f49ff42b54dd5870b944f
|
feat: use model definition
|
<0>:<del> # check config is valid, otherwise set default values
<1>:<del> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<2>:<del> self.config.max_position_embeddings_decoder = getattr(self.config, 'max_position_embeddings_decoder', OUTPUT_LENGTH)
<3>:<del>
<13>:<add> self.config.image_vocab_size + 1,
<del> self.config.vocab_size_output,
<18>:<add> self.encoder = FlaxBartEncoder(
<add> self.config, dtype=self.dtype, embed_tokens=self.shared
<del> self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
<19>:<add> )
<22>:<add> decoder_config.max_position_embeddings = (
<add> self.config.image_length + 1 # image tokens + BOS
<add> )
<del> decoder_config.max_position_embeddings = self.config.max_position_embeddings_decoder
<23>:<add> decoder_config.vocab_size = self.config.image_vocab_size + 1
<del> decoder_config.vocab_size = self.config.vocab_size_output
<24>:<add> self.decoder = FlaxBartDecoder(
<add> decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed
<del> self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
<25>:<add> )
|
# module: dalle_mini.model
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
<0> # check config is valid, otherwise set default values
<1> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<2> self.config.max_position_embeddings_decoder = getattr(self.config, 'max_position_embeddings_decoder', OUTPUT_LENGTH)
<3>
<4> # we keep shared to easily load pre-trained weights
<5> self.shared = nn.Embed(
<6> self.config.vocab_size,
<7> self.config.d_model,
<8> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<9> dtype=self.dtype,
<10> )
<11> # a separate embedding is used for the decoder
<12> self.decoder_embed = nn.Embed(
<13> self.config.vocab_size_output,
<14> self.config.d_model,
<15> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<16> dtype=self.dtype,
<17> )
<18> self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
<19>
<20> # the decoder has a different config
<21> decoder_config = BartConfig(self.config.to_dict())
<22> decoder_config.max_position_embeddings = self.config.max_position_embeddings_decoder
<23> decoder_config.vocab_size = self.config.vocab_size_output
<24> self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
<25>
|
===========unchanged ref 0===========
at: dalle_mini.model.CustomFlaxBartModule.setup
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
dtype=self.dtype,
)
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
to_dict() -> Dict[str, Any]
at: transformers.models.bart.configuration_bart
BartConfig(vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
===========unchanged ref 1===========
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
setup(self)
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 0===========
# module: dalle_mini.model
- # Model hyperparameters, for convenience
- OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
- OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
- BOS_TOKEN_ID = 16384
- BASE_MODEL = 'facebook/bart-large-cnn' # we currently have issues with bart-large
-
|
dalle_mini.model/CustomFlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
803c7df211b776d5c26f49ff42b54dd5870b944f
|
feat: use model definition
|
<1>:<add> # TODO: simplify with custom config class
<add> self.config.text_normalized = True / False
<del> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<5>:<add> self.config.image_vocab_size + 1, # encoded image token space + 1 for bos
<del> self.config.vocab_size_output,
<10>:<add> self.final_logits_bias = self.param(
<add> "final_logits_bias", self.bias_init, (1, self.config.image_vocab_size + 1)
<add> )
<del> self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.config.vocab_size_output))
|
# module: dalle_mini.model
+ class CustomFlaxBartForConditionalGenerationModule(
+ FlaxBartForConditionalGenerationModule
+ ):
- class CustomFlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
def setup(self):
<0> # check config is valid, otherwise set default values
<1> self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
<2>
<3> self.model = CustomFlaxBartModule(config=self.config, dtype=self.dtype)
<4> self.lm_head = nn.Dense(
<5> self.config.vocab_size_output,
<6> use_bias=False,
<7> dtype=self.dtype,
<8> kernel_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<9> )
<10> self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.config.vocab_size_output))
<11>
|
===========unchanged ref 0===========
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
===========changed ref 0===========
# module: dalle_mini.model
- # Model hyperparameters, for convenience
- OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
- OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
- BOS_TOKEN_ID = 16384
- BASE_MODEL = 'facebook/bart-large-cnn' # we currently have issues with bart-large
-
===========changed ref 1===========
# module: dalle_mini.model
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
- # check config is valid, otherwise set default values
- self.config.vocab_size_output = getattr(self.config, 'vocab_size_output', OUTPUT_VOCAB_SIZE)
- self.config.max_position_embeddings_decoder = getattr(self.config, 'max_position_embeddings_decoder', OUTPUT_LENGTH)
-
# we keep shared to easily load pre-trained weights
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
dtype=self.dtype,
)
# a separate embedding is used for the decoder
self.decoder_embed = nn.Embed(
+ self.config.image_vocab_size + 1,
- self.config.vocab_size_output,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
dtype=self.dtype,
)
+ self.encoder = FlaxBartEncoder(
+ self.config, dtype=self.dtype, embed_tokens=self.shared
- self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+ )
# the decoder has a different config
decoder_config = BartConfig(self.config.to_dict())
+ decoder_config.max_position_embeddings = (
+ self.config.image_length + 1 # image tokens + BOS
+ )
- decoder_config.max_position_embeddings = self.config.max_position_embeddings_decoder
+ decoder_config.vocab_size = self.config.image_vocab_size + 1
- decoder_config.vocab_size = self.config.vocab_size_output
+ self.decoder = FlaxBartDecoder</s>
===========changed ref 2===========
# module: dalle_mini.model
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
# offset: 1
<s> decoder_config.vocab_size = self.config.vocab_size_output
+ self.decoder = FlaxBartDecoder(
+ decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed
- self.decoder = FlaxBartDecoder(decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed)
+ )
|
dev.seq2seq.run_seq2seq_flax/DataTrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
803c7df211b776d5c26f49ff42b54dd5870b944f
|
feat: use model definition
|
<21>:<del> if self.val_max_target_length is None:
<22>:<del> self.val_max_target_length = self.max_target_length
|
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
def __post_init__(self):
<0> if self.dataset_repo_or_path is None:
<1> raise ValueError("Need a dataset repository or path.")
<2> if self.train_file is None or self.validation_file is None:
<3> raise ValueError("Need training/validation file.")
<4> else:
<5> if self.train_file is not None:
<6> extension = self.train_file.split(".")[-1]
<7> assert extension in [
<8> "tsv",
<9> "csv",
<10> "json",
<11> "jsonl",
<12> ], "`train_file` should be a tsv, csv or json file."
<13> if self.validation_file is not None:
<14> extension = self.validation_file.split(".")[-1]
<15> assert extension in [
<16> "tsv",
<17> "csv",
<18> "json",
<19> "jsonl",
<20> ], "`validation_file` should be a tsv, csv or json file."
<21> if self.val_max_target_length is None:
<22> self.val_max_target_length = self.max_target_length
<23> if self.streaming and (self.len_train is None or self.len_eval is None):
<24> raise ValueError(
<25> "Streaming requires providing length of training and validation datasets"
<26> )
<27>
|
===========unchanged ref 0===========
at: io.TextIOWrapper
read(self, size: Optional[int]=..., /) -> str
at: json
load(fp: SupportsRead[Union[str, bytes]], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
open(mode: OpenBinaryMode, buffering: Literal[0], encoding: None=..., errors: None=..., newline: None=...) -> FileIO
open(mode: OpenBinaryModeReading, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedReader
open(mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedRandom
open(mode: OpenBinaryMode, buffering: int, encoding: None=..., errors: None=..., newline: None=...) -> BinaryIO
open(mode: str, buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> IO[Any]
open(mode: OpenBinaryModeWriting, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedWriter
open(mode: OpenTextMode=..., buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> TextIOWrapper
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
+ logger = logging.getLogger(__name__)
- logger = pylogging.getLogger(__name__)
-
- # Model hyperparameters, for convenience
- # TODO: the model has now it's own definition file and should be imported
- OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
- OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
- BOS_TOKEN_ID = 16384
- BASE_MODEL = "facebook/bart-large-cnn" # we currently have issues with bart-large
-
===========changed ref 1===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
+ default=None,
- default=BASE_MODEL,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
+ image_vocab_size: Optional[int] = field(
+ default=None,
+ metadata={"help": "Vocab size of image encoder"},
+ )
+ image_length: Optional[int] = field(
+ default=None,
+ metadata={"help": "Number of tokens per image"},
+ )
+ tokenizer_name: Optional[str] = field(
- config_name: Optional[str] = field(
default=None,
metadata={
+ "help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
- "help": "Pretrained config name or path if not the same as model_name"
},
)
+ normalize_text: bool = field(
- use_fast_tokenizer: bool = field(
+ default=False,
- default=True,
- metadata={
- "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
- },
+ metadata={"help": "Whether to normalize text or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
from_checkpoint: Optional[str] = field(
default=None,
metadata={
"help": "Loads a pretrained wandb checkpoint. Use artifact reference</s>
===========changed ref 2===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class ModelArguments:
# offset: 1
<s> field(
default=None,
metadata={
"help": "Loads a pretrained wandb checkpoint. Use artifact reference."
},
)
===========changed ref 3===========
# module: dalle_mini.model
-
-
===========changed ref 4===========
# module: dalle_mini.model
- # Model hyperparameters, for convenience
- OUTPUT_VOCAB_SIZE = 16384 + 1 # encoded image token space + 1 for bos
- OUTPUT_LENGTH = 256 + 1 # number of encoded tokens + 1 for bos
- BOS_TOKEN_ID = 16384
- BASE_MODEL = 'facebook/bart-large-cnn' # we currently have issues with bart-large
-
===========changed ref 5===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
)
len_train: Optional[int] = field(
default=None,
metadata={"help": "Length of training dataset, required for streaming"},
)
len_eval: Optional[int] = field(
default=None,
metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
no_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
)
- max_target</s>
|
dalle_mini.model/CustomFlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
b7d8724b49aa3f29281009c7d343f1d2137fd835
|
fix: correct use of dtype
|
<4>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<5>:<del> dtype=self.dtype,
<11>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<12>:<del> dtype=self.dtype,
<19>:<add> # TODO: should not be needed once we have custom config/module
|
# module: dalle_mini.model
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
<0> # we keep shared to easily load pre-trained weights
<1> self.shared = nn.Embed(
<2> self.config.vocab_size,
<3> self.config.d_model,
<4> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<5> dtype=self.dtype,
<6> )
<7> # a separate embedding is used for the decoder
<8> self.decoder_embed = nn.Embed(
<9> self.config.image_vocab_size + 1,
<10> self.config.d_model,
<11> embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<12> dtype=self.dtype,
<13> )
<14> self.encoder = FlaxBartEncoder(
<15> self.config, dtype=self.dtype, embed_tokens=self.shared
<16> )
<17>
<18> # the decoder has a different config
<19> decoder_config = BartConfig(self.config.to_dict())
<20> decoder_config.max_position_embeddings = (
<21> self.config.image_length + 1 # image tokens + BOS
<22> )
<23> decoder_config.vocab_size = self.config.image_vocab_size + 1
<24> self.decoder = FlaxBartDecoder(
<25> decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed
<26> )
<27>
|
===========unchanged ref 0===========
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
to_dict() -> Dict[str, Any]
at: transformers.models.bart.configuration_bart
BartConfig(vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
|
dalle_mini.model/CustomFlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
b7d8724b49aa3f29281009c7d343f1d2137fd835
|
fix: correct use of dtype
|
<0>:<del> # check config is valid, otherwise set default values
<1>:<del> # TODO: simplify with custom config class
<2>:<del> self.config.text_normalized = True / False
<3>:<del>
<8>:<del> dtype=self.dtype,
<9>:<add> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<del> kernel_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
|
# module: dalle_mini.model
class CustomFlaxBartForConditionalGenerationModule(
FlaxBartForConditionalGenerationModule
):
def setup(self):
<0> # check config is valid, otherwise set default values
<1> # TODO: simplify with custom config class
<2> self.config.text_normalized = True / False
<3>
<4> self.model = CustomFlaxBartModule(config=self.config, dtype=self.dtype)
<5> self.lm_head = nn.Dense(
<6> self.config.image_vocab_size + 1, # encoded image token space + 1 for bos
<7> use_bias=False,
<8> dtype=self.dtype,
<9> kernel_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
<10> )
<11> self.final_logits_bias = self.param(
<12> "final_logits_bias", self.bias_init, (1, self.config.image_vocab_size + 1)
<13> )
<14>
|
===========unchanged ref 0===========
at: dalle_mini.model
CustomFlaxBartForConditionalGeneration(config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart
FlaxBartForConditionalGeneration(config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGeneration
module_class = FlaxBartForConditionalGenerationModule
dtype: jnp.dtype = jnp.float32
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
===========changed ref 0===========
# module: dalle_mini.model
class CustomFlaxBartModule(FlaxBartModule):
def setup(self):
# we keep shared to easily load pre-trained weights
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
- dtype=self.dtype,
)
# a separate embedding is used for the decoder
self.decoder_embed = nn.Embed(
self.config.image_vocab_size + 1,
self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- embedding_init=jax.nn.initializers.normal(self.config.init_std, self.dtype),
- dtype=self.dtype,
)
self.encoder = FlaxBartEncoder(
self.config, dtype=self.dtype, embed_tokens=self.shared
)
# the decoder has a different config
+ # TODO: should not be needed once we have custom config/module
decoder_config = BartConfig(self.config.to_dict())
decoder_config.max_position_embeddings = (
self.config.image_length + 1 # image tokens + BOS
)
decoder_config.vocab_size = self.config.image_vocab_size + 1
self.decoder = FlaxBartDecoder(
decoder_config, dtype=self.dtype, embed_tokens=self.decoder_embed
)
|
dev.seq2seq.run_seq2seq_flax/DataTrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
87fac28eae3c534ab30f3db10e737b7c3a3563a2
|
feat: simplify parameters
|
<2>:<del> if self.train_file is None or self.validation_file is None:
<3>:<del> raise ValueError("Need training/validation file.")
<4>:<del> else:
<5>:<del> if self.train_file is not None:
<6>:<del> extension = self.train_file.split(".")[-1]
<7>:<del> assert extension in [
<8>:<del> "tsv",
<9>:<del> "csv",
<10>:<del> "json",
<11>:<del> "jsonl",
<12>:<del> ], "`train_file` should be a tsv, csv or json file."
<13>:<del> if self.validation_file is not None:
<14>:<del> extension = self.validation_file.split(".")[-1]
<15>:<del> assert extension in [
<16>:<del> "tsv",
<17>:<del> "csv",
<18>:<del> "json",
<19>:<del> "jsonl",
<20>:<del> ], "`validation_file` should be a tsv, csv or json file."
<21>:<del> if self.streaming and (self.len_train is None or self.len_eval is None):
<22>:<del> raise ValueError(
<23>:<del> "Streaming requires providing length of training and validation datasets"
<24>:<del> )
|
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
def __post_init__(self):
<0> if self.dataset_repo_or_path is None:
<1> raise ValueError("Need a dataset repository or path.")
<2> if self.train_file is None or self.validation_file is None:
<3> raise ValueError("Need training/validation file.")
<4> else:
<5> if self.train_file is not None:
<6> extension = self.train_file.split(".")[-1]
<7> assert extension in [
<8> "tsv",
<9> "csv",
<10> "json",
<11> "jsonl",
<12> ], "`train_file` should be a tsv, csv or json file."
<13> if self.validation_file is not None:
<14> extension = self.validation_file.split(".")[-1]
<15> assert extension in [
<16> "tsv",
<17> "csv",
<18> "json",
<19> "jsonl",
<20> ], "`validation_file` should be a tsv, csv or json file."
<21> if self.streaming and (self.len_train is None or self.len_eval is None):
<22> raise ValueError(
<23> "Streaming requires providing length of training and validation datasets"
<24> )
<25>
|
===========unchanged ref 0===========
at: datasets.arrow_dataset
Dataset(arrow_table: Table, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, indices_table: Optional[Table]=None, fingerprint: Optional[str]=None)
at: io.TextIOWrapper
read(self, size: Optional[int]=..., /) -> str
at: json
load(fp: SupportsRead[Union[str, bytes]], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
===========unchanged ref 1===========
open(mode: OpenBinaryMode, buffering: Literal[0], encoding: None=..., errors: None=..., newline: None=...) -> FileIO
open(mode: OpenBinaryModeReading, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedReader
open(mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedRandom
open(mode: OpenBinaryMode, buffering: int, encoding: None=..., errors: None=..., newline: None=...) -> BinaryIO
open(mode: str, buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> IO[Any]
open(mode: OpenBinaryModeWriting, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedWriter
open(mode: OpenTextMode=..., buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> TextIOWrapper
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
)
len_train: Optional[int] = field(
default=None,
metadata={"help": "Length of training dataset, required for streaming"},
)
len_eval: Optional[int] = field(
default=None,
metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
+ use_decay: bool = field(
- no_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate</s>
===========changed ref 1===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s> no_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
- default=80, # ensure we have the same datasets cached data and avoid using too much space
- metadata={"help": "The number of processes to use for the preprocessing."},
- )
- source_prefix: Optional[str] = field(
default=None,
metadata={
+ "help": "The number of processes to use for the preprocessing. Not used in streaming mode."
- "help": "A prefix to add before every source text (useful for T5 models)."
},
)
overwrite_cache: bool = field(
default=False,
+ metadata={
+ "help": "Overwrite the cached training and evaluation sets. Not used in streaming mode."
+ },
- metadata={"help": "Overwrite the cached training and evaluation sets"},
)
log_interval: Optional[int] = field(
default=40,
metadata={"help": "Log frequency for metrics"},
)
log_model: bool = field(
default=False,
+ metadata={"help": "Log frequency for model"},
- metadata={"help": "Overwrite the cached</s>
===========changed ref 2===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 2
<s> evaluation sets"},
)
save_model_steps: Optional[int] = field(
- default=5000, # about once every 1.5h in our experiments
- metadata={
- "help": "For logging the model more frequently. Used only when `log_model` is set."
- },
+ default=5000,
+ metadata={"help": "For saving/logging the model more frequently"},
)
|
dev.seq2seq.run_seq2seq_flax/create_learning_rate_fn
|
Modified
|
borisdayma~dalle-mini
|
87fac28eae3c534ab30f3db10e737b7c3a3563a2
|
feat: simplify parameters
|
<6>:<add> if not use_decay:
<del> if no_decay:
|
# module: dev.seq2seq.run_seq2seq_flax
def create_learning_rate_fn(
train_ds_size: int,
train_batch_size: int,
num_train_epochs: int,
num_warmup_steps: int,
learning_rate: float,
+ use_decay: bool,
- no_decay: bool,
) -> Callable[[int], jnp.array]:
<0> """Returns a linear warmup, linear_decay learning rate function."""
<1> steps_per_epoch = train_ds_size // train_batch_size
<2> num_train_steps = steps_per_epoch * num_train_epochs
<3> warmup_fn = optax.linear_schedule(
<4> init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps
<5> )
<6> if no_decay:
<7> return warmup_fn
<8> decay_fn = optax.linear_schedule(
<9> init_value=learning_rate,
<10> end_value=0,
<11> transition_steps=num_train_steps - num_warmup_steps,
<12> )
<13> schedule_fn = optax.join_schedules(
<14> schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]
<15> )
<16> return schedule_fn
<17>
|
===========unchanged ref 0===========
at: dev.seq2seq.run_seq2seq_flax
ModelArguments(model_name_or_path: Optional[str]=field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
), image_vocab_size: Optional[int]=field(
default=None,
metadata={"help": "Vocab size of image encoder"},
), image_length: Optional[int]=field(
default=None,
metadata={"help": "Number of tokens per image"},
), tokenizer_name: Optional[str]=field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
), normalize_text: bool=field(
default=False,
metadata={"help": "Whether to normalize text or not."},
), dtype: Optional[str]=field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
), from_checkpoint: Optional[str]=field(
default=None,
metadata={
"help": "Loads a pretrained wandb checkpoint. Use artifact reference."
},
))
===========unchanged ref 1===========
DataTrainingArguments(text_column: Optional[str]=field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
), encoding_column: Optional[str]=field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
), dataset_repo_or_path: Optional[str]=field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
), train_file: Optional[str]=field(
default=None, metadata={"help": "The input training data file (a text file)."}
), validation_file: Optional[str]=field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
), streaming: bool=field(
default=False,
metadata={"help": "Whether to stream the dataset."},
), len_train: Optional[int]=field(
default=None,
metadata={"help": "Length of training dataset, required for streaming"},
), len_eval: Optional[int]=field(
default=None,
metadata={"help": "Length of validation dataset, required for streaming"},
), max_source_length: Optional[int]=field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
), no_decay: bool=field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
), max_train_samples: Optional[int]=field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
), max_eval_samples</s>
===========unchanged ref 2===========
at: dev.seq2seq.run_seq2seq_flax.main
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
at: os.path
abspath(path: _PathLike[AnyStr]) -> AnyStr
abspath(path: AnyStr) -> AnyStr
abspath = _abspath_fallback
at: sys
argv: List[str]
at: transformers.hf_argparser
HfArgumentParser(dataclass_types: Union[DataClassType, Iterable[DataClassType]], *, prog: Optional[str]=..., usage: Optional[str]=..., description: Optional[str]=..., epilog: Optional[str]=..., parents: Sequence[ArgumentParser]=..., formatter_class: _FormatterClass=..., prefix_chars: str=..., fromfile_prefix_chars: Optional[str]=..., argument_default: Any=..., conflict_handler: str=..., add_help: bool=..., allow_abbrev: bool=...)
at: transformers.hf_argparser.HfArgumentParser
dataclass_types: Iterable[DataClassType]
parse_json_file(json_file: str, allow_extra_keys: bool=False) -> Tuple[DataClass, ...]
===========unchanged ref 3===========
at: transformers.training_args
TrainingArguments(output_dir: str=field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
), overwrite_output_dir: bool=field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
), do_train: bool=field(default=False, metadata={"help": "Whether to run training."}), do_eval: bool=field(default=False, metadata={"help": "Whether to run eval on the dev set."}), do_predict: bool=field(default=False, metadata={"help": "Whether to run predictions on the test set."}), evaluation_strategy: Union[IntervalStrategy, str]=field(
default="no",
metadata={"help": "The evaluation strategy to use."},
), prediction_loss_only: bool=field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
), per_device_train_batch_size: int=field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
), per_device_eval_batch_size: int=field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
), per_gpu_train_batch_size: Optional[int]=field(
default=None,
metadata={
"help": (
"Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
)
},
), per_gpu_eval_batch_size: Optional[int]=field(
default=None,
metadata={
"help": (
"Deprecated, the use of `--per_device_eval_batch_size` is preferred. "
"Batch size per GPU/TP</s>
===========unchanged ref 4===========
at: wandb
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
|
dev.seq2seq.run_seq2seq_flax/TrainState.restore_state
|
Modified
|
borisdayma~dalle-mini
|
6523a6d550a36f8e5fc3312bae486c24d2eb2099
|
feat: add metrics + cleanup
|
<4>:<add> # restore other parameters
<del> # restore steps
<7>:<del> new_step = training_state["step"]
<10>:<add> return self.replace(
<add> opt_state=new_opt_state,
<add> step=training_state["step"],
<add> train_time=training_state["train_time"],
<add> train_samples=training_state["train_samples"],
<add> )
<del> return self.replace(step=new_step, opt_state=new_opt_state)
|
# module: dev.seq2seq.run_seq2seq_flax
class TrainState(train_state.TrainState):
def restore_state(self, artifact_dir):
<0> # restore optimizer state
<1> with (Path(artifact_dir) / "opt_state.msgpack").open("rb") as f:
<2> new_opt_state = from_bytes(self.opt_state, f.read())
<3>
<4> # restore steps
<5> with (Path(artifact_dir) / "training_state.json").open("r") as f:
<6> training_state = json.load(f)
<7> new_step = training_state["step"]
<8>
<9> # replace state
<10> return self.replace(step=new_step, opt_state=new_opt_state)
<11>
|
===========unchanged ref 0===========
at: datasets.arrow_dataset
Dataset(arrow_table: Table, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, indices_table: Optional[Table]=None, fingerprint: Optional[str]=None)
at: dev.seq2seq.run_seq2seq_flax.TrainState.restore_state
new_opt_state = from_bytes(self.opt_state, f.read())
training_state = json.load(f)
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray = None
+ epoch: int = 0
+ train_time: float = 0.0 # total time the model trained
+ train_samples: int = 0 # number of samples seen
===========changed ref 1===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
- )
- len_train: Optional[int] = field(
- default=None,
- metadata={"help": "Length of training dataset, required for streaming"},
- )
- len_eval: Optional[int] = field(
- default=None,
- metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
use_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
</s>
===========changed ref 2===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s> bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={
"help": "The number of processes to use for the preprocessing. Not used in streaming mode."
},
)
overwrite_cache: bool = field(
default=False,
metadata={
"help": "Overwrite the cached training and evaluation sets. Not used in streaming mode."
},
)
log_interval: Optional[int] = field(
default=40,
metadata={"help": "Log frequency for metrics"},
)
log_model: bool = field(
default=False,
metadata={"help": "Log frequency for model"},
)
- save_model_steps: Optional[int] = field(
- default=5000,
- metadata={"help": "For saving/logging the model more frequently"},
- )
|
dev.seq2seq.run_seq2seq_flax/create_learning_rate_fn
|
Modified
|
borisdayma~dalle-mini
|
6523a6d550a36f8e5fc3312bae486c24d2eb2099
|
feat: add metrics + cleanup
|
<1>:<add> if use_decay:
<add> assert (
<add> num_train_steps is not None
<add> ), "Learning rate with decay requires number of training steps"
<del> steps_per_epoch = train_ds_size // train_batch_size
<2>:<del> num_train_steps = steps_per_epoch * num_train_epochs
|
<s>.run_seq2seq_flax
def create_learning_rate_fn(
- train_ds_size: int,
- train_batch_size: int,
- num_train_epochs: int,
num_warmup_steps: int,
learning_rate: float,
use_decay: bool,
+ num_train_steps: int = None, # used only with `use_decay`, typically train_size // batch_size * num_epochs
) -> Callable[[int], jnp.array]:
<0> """Returns a linear warmup, linear_decay learning rate function."""
<1> steps_per_epoch = train_ds_size // train_batch_size
<2> num_train_steps = steps_per_epoch * num_train_epochs
<3> warmup_fn = optax.linear_schedule(
<4> init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps
<5> )
<6> if not use_decay:
<7> return warmup_fn
<8> decay_fn = optax.linear_schedule(
<9> init_value=learning_rate,
<10> end_value=0,
<11> transition_steps=num_train_steps - num_warmup_steps,
<12> )
<13> schedule_fn = optax.join_schedules(
<14> schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]
<15> )
<16> return schedule_fn
<17>
|
===========unchanged ref 0===========
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray = None
+ epoch: int = 0
+ train_time: float = 0.0 # total time the model trained
+ train_samples: int = 0 # number of samples seen
===========changed ref 1===========
# module: dev.seq2seq.run_seq2seq_flax
class TrainState(train_state.TrainState):
def restore_state(self, artifact_dir):
# restore optimizer state
with (Path(artifact_dir) / "opt_state.msgpack").open("rb") as f:
new_opt_state = from_bytes(self.opt_state, f.read())
+ # restore other parameters
- # restore steps
with (Path(artifact_dir) / "training_state.json").open("r") as f:
training_state = json.load(f)
- new_step = training_state["step"]
# replace state
+ return self.replace(
+ opt_state=new_opt_state,
+ step=training_state["step"],
+ train_time=training_state["train_time"],
+ train_samples=training_state["train_samples"],
+ )
- return self.replace(step=new_step, opt_state=new_opt_state)
===========changed ref 2===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
- )
- len_train: Optional[int] = field(
- default=None,
- metadata={"help": "Length of training dataset, required for streaming"},
- )
- len_eval: Optional[int] = field(
- default=None,
- metadata={"help": "Length of validation dataset, required for streaming"},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
use_decay: bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
</s>
===========changed ref 3===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s> bool = field(
default=False,
metadata={"help": "Whether to use decay in the learning rate scheduler."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={
"help": "The number of processes to use for the preprocessing. Not used in streaming mode."
},
)
overwrite_cache: bool = field(
default=False,
metadata={
"help": "Overwrite the cached training and evaluation sets. Not used in streaming mode."
},
)
log_interval: Optional[int] = field(
default=40,
metadata={"help": "Log frequency for metrics"},
)
log_model: bool = field(
default=False,
metadata={"help": "Log frequency for model"},
)
- save_model_steps: Optional[int] = field(
- default=5000,
- metadata={"help": "For saving/logging the model more frequently"},
- )
|
dev.seq2seq.run_seq2seq_flax/data_loader
|
Modified
|
borisdayma~dalle-mini
|
eac6890e735f37ab7af407c4aaef9d35a7f9918d
|
feat: use_auth_token + seed for dataset and model
|
<6>:<add> if rng is not None:
<del> if shuffle:
|
# module: dev.seq2seq.run_seq2seq_flax
def data_loader(
+ dataset: Dataset,
+ batch_size: int,
+ rng: jax.random.PRNGKey = None,
- rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False
):
<0> """
<1> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<2> Shuffle batches if `shuffle` is `True`.
<3> """
<4> steps_per_epoch = len(dataset) // batch_size
<5>
<6> if shuffle:
<7> batch_idx = jax.random.permutation(rng, len(dataset))
<8> else:
<9> batch_idx = jnp.arange(len(dataset))
<10>
<11> batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
<12> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<13>
<14> for idx in batch_idx:
<15> batch = dataset[idx]
<16> batch = {k: jnp.array(v) for k, v in batch.items()}
<17> batch = shard(batch)
<18> yield batch
<19>
|
===========unchanged ref 0===========
at: datasets.arrow_dataset
Dataset(arrow_table: Table, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, indices_table: Optional[Table]=None, fingerprint: Optional[str]=None)
at: io.TextIOWrapper
read(self, size: Optional[int]=..., /) -> str
at: json
load(fp: SupportsRead[Union[str, bytes]], *, cls: Optional[Type[JSONDecoder]]=..., object_hook: Optional[Callable[[Dict[Any, Any]], Any]]=..., parse_float: Optional[Callable[[str], Any]]=..., parse_int: Optional[Callable[[str], Any]]=..., parse_constant: Optional[Callable[[str], Any]]=..., object_pairs_hook: Optional[Callable[[List[Tuple[Any, Any]]], Any]]=..., **kwds: Any) -> Any
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
===========unchanged ref 1===========
open(mode: OpenBinaryMode, buffering: Literal[0], encoding: None=..., errors: None=..., newline: None=...) -> FileIO
open(mode: OpenBinaryModeReading, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedReader
open(mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedRandom
open(mode: OpenBinaryMode, buffering: int, encoding: None=..., errors: None=..., newline: None=...) -> BinaryIO
open(mode: str, buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> IO[Any]
open(mode: OpenBinaryModeWriting, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedWriter
open(mode: OpenTextMode=..., buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> TextIOWrapper
===========changed ref 0===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
text_column: Optional[str] = field(
default="caption",
metadata={
"help": "The name of the column in the datasets containing the full texts (for summarization)."
},
)
encoding_column: Optional[str] = field(
default="encoding",
metadata={
"help": "The name of the column in the datasets containing the image encodings."
},
)
dataset_repo_or_path: Optional[str] = field(
default=None,
metadata={"help": "The dataset repository containing encoded files."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."
},
)
streaming: bool = field(
default=False,
metadata={"help": "Whether to stream the dataset."},
+ )
+ use_auth_token: bool = field(
+ default=False,
+ metadata={
+ "help": "Whether to use the authentication token for private datasets."
+ },
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] =</s>
===========changed ref 1===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class DataTrainingArguments:
# offset: 1
<s> examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={
"help": "The number of processes to use for the preprocessing. Not used in streaming mode."
},
)
overwrite_cache: bool = field(
default=False,
metadata={
"help": "Overwrite the cached training and evaluation sets. Not used in streaming mode."
},
)
===========changed ref 2===========
# module: dev.seq2seq.run_seq2seq_flax
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the dev set."}
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing a backward/update pass."
},
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
adafactor: bool = field(
default=False,
metadata={"help": "Whether or not to replace AdamW by Adafactor."},
)
weight_decay: float = field(
default=None, metadata={"help": "Weight decay if we apply some."}
)
adam_beta1: float = field(
default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}
)
adam_beta2: float = field</s>
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
0fe3e72e6aa110c878f0d91497858715b46d781f
|
fix(data): minor bugs
|
<13>:<add> partial_normalize_function = partial(
<add> normalize_function,
<add> text_column=self.text_column,
<add> text_normalizer=text_normalizer,
<add> )
<19>:<add> getattr(self, ds).map(partial_normalize_function)
<del> getattr(self, ds).map(
<20>:<del> normalize_text,
<21>:<del> fn_kwargs={
<22>:<del> "text_column": self.text_column,
<23>:<del> "text_normalizer": text_normalizer,
<24>:<del> },
<25>:<del> )
<28>:<add> partial_normalize_function,
<del> normalize_text,
<29>:<del> fn_kwargs={
<30>:<del> "text_column": self.text_column,
<31>:<del> "text_normalizer": text_normalizer,
<32>:<del> },
<41>:<add> partial_preprocess_function = partial(
<add> preprocess_function,
<add> tokenizer=tokenizer,
<add> text_column=self.text_column,
<add> encoding_column=self.encoding_column,
<add> max_source_length=self.max_source_length,
<add> decoder_start_token_id=decoder_start_token_id,
<add> )
<48>:<add> partial_preprocess_function,
<del> preprocess_function,
|
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, decoder_start_token_id, normalize_text):
<0> if self.streaming:
<1> # we need to shuffle early in streaming mode
<2> if hasattr(self, "train_dataset"):
<3> self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
<4> else:
<5> # prepare rng for later shuffling
<6> if self.seed_dataset is None:
<7> self.seed_dataset = np.random.get_state()[1][0]
<8> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<9>
<10> # normalize text
<11> if normalize_text:
<12> text_normalizer = TextNormalizer()
<13> for ds in ["train_dataset", "eval_dataset"]:
<14> if hasattr(self, ds):
<15> setattr(
<16> self,
<17> ds,
<18> (
<19> getattr(self, ds).map(
<20> normalize_text,
<21> fn_kwargs={
<22> "text_column": self.text_column,
<23> "text_normalizer": text_normalizer,
<24> },
<25> )
<26> if self.streaming
<27> else getattr(self, ds).map(
<28> normalize_text,
<29> fn_kwargs={
<30> "text_column": self.text_column,
<31> "text_normalizer": text_normalizer,
<32> },
<33> num_proc=self.preprocessing_num_workers,
<34> load_from_cache_file=not self.overwrite_cache,
<35> desc="Normalizing datasets",
<36> )
<37> ),
<38> )
<39>
<40> # preprocess
<41> for ds in ["train_dataset", "eval_dataset"]:
<42> if hasattr(self, ds):
<43> setattr(
<44> self,
<45> ds,
<46> (
<47> getattr(self, ds).map(
<48> preprocess_function,
<49> batched=</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, decoder_start_token_id, normalize_text):
# offset: 1
fn_kwargs={
"tokenizer": tokenizer,
"text_column": self.text_column,
"encoding_column": self.encoding_column,
"max_source_length": self.max_source_length,
"decoder_start_token_id": decoder_start_token_id,
},
)
if self.streaming
else getattr(self, ds).map(
preprocess_function,
batched=True,
fn_kwargs={
"tokenizer": tokenizer,
"text_column": self.text_column,
"encoding_column": self.encoding_column,
"max_source_length": self.max_source_length,
"decoder_start_token_id": decoder_start_token_id,
},
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, dataset_type: str="dataset", streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_source_length: int=128, max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False))
normalize_function(example, text_column, text_normalizer)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_source_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
dataset_type: str = "dataset"
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_source_length: int = 128
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
===========unchanged ref 1===========
at: dalle_mini.data.Dataset.__post_init__
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
at: dalle_mini.text
TextNormalizer()
at: datasets.arrow_dataset.Dataset
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> "Dataset"
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 2===========
at: datasets.dataset_dict.DatasetDict
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> "DatasetDict"
shuffle(seeds: Optional[Union[int, Dict[str, Optional[int]]]]=None, seed: Optional[int]=None, generators: Optional[Dict[str, np.random.Generator]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> "DatasetDict"
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: numpy.random.mtrand
get_state = _rand.get_state
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
dataset_type: str = "dataset"
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_source_length: int = 128
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
+ train_dataset: Dataset = field(init=False)
- train_dataset = field(init=False)
+ eval_dataset: Dataset = field(init=False)
- eval_dataset = field(init=False)
+ rng_dataset: jnp.ndarray = field(init=False)
- rng_dataset = field(init=False)
|
dalle_mini.model/CustomFlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
92ccf4c8bc5442d6dfd57a3ebb088c6d84ba682f
|
feat(model): set default config for legacy models
|
<0>:<add> # set default config
<add> self.config.normalize_text = getattr(self.config, "normalize_text", False)
<add> self.config.image_length = getattr(self.config, "image_length", 256)
<add> self.config.image_vocab_size = getattr(self.config, "image_vocab_size", 16384)
<add>
|
# module: dalle_mini.model
class CustomFlaxBartForConditionalGenerationModule(
FlaxBartForConditionalGenerationModule
):
def setup(self):
<0> self.model = CustomFlaxBartModule(config=self.config, dtype=self.dtype)
<1> self.lm_head = nn.Dense(
<2> self.config.image_vocab_size + 1, # encoded image token space + 1 for bos
<3> use_bias=False,
<4> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<5> )
<6> self.final_logits_bias = self.param(
<7> "final_logits_bias", self.bias_init, (1, self.config.image_vocab_size + 1)
<8> )
<9>
|
===========unchanged ref 0===========
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
setup(self)
|
dalle_mini.modeling_bart_flax/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
a11892f9f4570aed1ec5687a414ac02fb77a2cc6
|
fix(model): use correct params
|
<20>:<add> jnp.ones((1, embed_dim), dtype="bool"), dtype="bool"
<del> jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
|
# module: dalle_mini.modeling_bart_flax
class FlaxBartAttention(nn.Module):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> assert (
<2> self.head_dim * self.num_heads == self.embed_dim
<3> ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
<4>
<5> dense = partial(
<6> nn.Dense,
<7> self.embed_dim,
<8> use_bias=False,
<9> dtype=self.dtype,
<10> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<11> )
<12>
<13> self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
<14> self.out_proj = dense()
<15>
<16> self.dropout_layer = nn.Dropout(rate=self.dropout)
<17>
<18> if self.causal:
<19> self.causal_mask = make_causal_mask(
<20> jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
<21> )
<22>
|
===========unchanged ref 0===========
at: dalle_mini.modeling_bart_flax.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
===========changed ref 0===========
# module: dalle_mini.configuration_bart
-
-
===========changed ref 1===========
# module: dalle_mini.configuration_bart
logger = logging.get_logger(__name__)
- BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
- "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
- # See all BART models at https://huggingface.co/models?filter=bart
- }
===========changed ref 2===========
<s>checkpointing=False,
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
- self.vocab_size = vocab_size
- self.decoder_vocab_size = decoder_vocab_size
- self.max_position_embeddings = max_position_embeddings
- self.decoder_max_position_embeddings = decoder_max_position_embeddings
- self.d_model = d_model
- self.encoder_ffn_dim = encoder_ffn_dim
- self.encoder_layers = encoder_layers
- self.encoder_attention_heads = encoder_attention_heads
- self.decoder_ffn_dim = decoder_ffn_dim
- self.decoder_layers = decoder_layers
- self.decoder_attention_heads = decoder_attention_heads
- self.dropout = dropout
- self.attention_dropout = attention_dropout
- self.activation_dropout = activation_dropout
- self.activation_function = activation_function
- self.init_std = init_std
- self.encoder_layerdrop = encoder_layerdrop
- self.decoder_layerdrop = decoder_layerdrop
- self.classifier_dropout = classifier_dropout
- self.use_cache = use_cache
- self.num_hidden_layers = encoder_layers
- self.gradient_checkpointing = gradient_checkpointing
- self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
-
- super().__init__(
- num_labels=num_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_</s>
===========changed ref 3===========
<s>
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
# offset: 1
<s>_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_token_id,
- eos_token_id=eos_token_id,
- is_encoder_decoder=is_encoder_decoder,
- decoder_start_token_id=decoder_start_token_id,
- forced_eos_token_id=forced_eos_token_id,
- tie_word_embeddings=tie_word_embeddings,
- **kwargs,
- )
-
- # ensure backward compatibility for BART CNN models
- if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
- self.forced_bos_token_id = self.bos_token_id
- warnings.warn(
- f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
- "The config can simply be saved and uploaded again to be fixed."
- )
-
===========changed ref 4===========
<s>0.0,
+ init_std=0.02,
+ classifier_dropout=0.0,
+ scale_embedding=False,
+ gradient_checkpointing=False,
+ use_cache=True,
+ num_labels=3,
+ is_encoder_decoder=True,
+ forced_eos_token_id=None,
+ tie_word_embeddings=False, # don't tie for scaling reasons and due to different modalities and sizes
+ **kwargs,
+ ):
+ self.normalize_text = normalize_text
+ self.encoder_vocab_size = encoder_vocab_size
+ self.decoder_vocab_size = image_vocab_size
+ self.image_length = image_length
+ self.max_text_length = max_text_length
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.classifier_dropout = classifier_dropout
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.gradient_checkpointing = gradient_checkpointing
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.decoder_start_token_id = image_vocab_size, # BOS appended to vocab
+ self.min_length = image_length + 1
+ self.</s>
|
dalle_mini.modeling_bart_flax/FlaxBartEncoder.setup
|
Modified
|
borisdayma~dalle-mini
|
a11892f9f4570aed1ec5687a414ac02fb77a2cc6
|
fix(model): use correct params
|
<4>:<del> self.max_source_positions = self.config.max_position_embeddings
<8>:<add> self.config.encoder_vocab_size,
<del> self.config.vocab_size,
<17>:<add> self.config.max_text_length + self.offset,
<del> self.config.max_position_embeddings + self.offset,
|
# module: dalle_mini.modeling_bart_flax
class FlaxBartEncoder(nn.Module):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.max_source_positions = self.config.max_position_embeddings
<5> self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
<6>
<7> self.embed_tokens = nn.Embed(
<8> self.config.vocab_size,
<9> embed_dim,
<10> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<11> )
<12>
<13> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<14> # and adjust num_embeddings appropriately. Other models don't have this hack
<15> self.offset = 0
<16> self.embed_positions = nn.Embed(
<17> self.config.max_position_embeddings + self.offset,
<18> embed_dim,
<19> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<20> )
<21> self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
<22> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
<23>
|
===========unchanged ref 0===========
at: dalle_mini.modeling_bart_flax.FlaxBartEncoder
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: math
sqrt(x: SupportsFloat, /) -> float
===========changed ref 0===========
# module: dalle_mini.modeling_bart_flax
class FlaxBartAttention(nn.Module):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
assert (
self.head_dim * self.num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
+ jnp.ones((1, embed_dim), dtype="bool"), dtype="bool"
- jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
===========changed ref 1===========
# module: dalle_mini.configuration_bart
-
-
===========changed ref 2===========
# module: dalle_mini.configuration_bart
logger = logging.get_logger(__name__)
- BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
- "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
- # See all BART models at https://huggingface.co/models?filter=bart
- }
===========changed ref 3===========
<s>checkpointing=False,
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
- self.vocab_size = vocab_size
- self.decoder_vocab_size = decoder_vocab_size
- self.max_position_embeddings = max_position_embeddings
- self.decoder_max_position_embeddings = decoder_max_position_embeddings
- self.d_model = d_model
- self.encoder_ffn_dim = encoder_ffn_dim
- self.encoder_layers = encoder_layers
- self.encoder_attention_heads = encoder_attention_heads
- self.decoder_ffn_dim = decoder_ffn_dim
- self.decoder_layers = decoder_layers
- self.decoder_attention_heads = decoder_attention_heads
- self.dropout = dropout
- self.attention_dropout = attention_dropout
- self.activation_dropout = activation_dropout
- self.activation_function = activation_function
- self.init_std = init_std
- self.encoder_layerdrop = encoder_layerdrop
- self.decoder_layerdrop = decoder_layerdrop
- self.classifier_dropout = classifier_dropout
- self.use_cache = use_cache
- self.num_hidden_layers = encoder_layers
- self.gradient_checkpointing = gradient_checkpointing
- self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
-
- super().__init__(
- num_labels=num_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_</s>
===========changed ref 4===========
<s>
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
# offset: 1
<s>_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_token_id,
- eos_token_id=eos_token_id,
- is_encoder_decoder=is_encoder_decoder,
- decoder_start_token_id=decoder_start_token_id,
- forced_eos_token_id=forced_eos_token_id,
- tie_word_embeddings=tie_word_embeddings,
- **kwargs,
- )
-
- # ensure backward compatibility for BART CNN models
- if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
- self.forced_bos_token_id = self.bos_token_id
- warnings.warn(
- f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
- "The config can simply be saved and uploaded again to be fixed."
- )
-
===========changed ref 5===========
<s>0.0,
+ init_std=0.02,
+ classifier_dropout=0.0,
+ scale_embedding=False,
+ gradient_checkpointing=False,
+ use_cache=True,
+ num_labels=3,
+ is_encoder_decoder=True,
+ forced_eos_token_id=None,
+ tie_word_embeddings=False, # don't tie for scaling reasons and due to different modalities and sizes
+ **kwargs,
+ ):
+ self.normalize_text = normalize_text
+ self.encoder_vocab_size = encoder_vocab_size
+ self.decoder_vocab_size = image_vocab_size
+ self.image_length = image_length
+ self.max_text_length = max_text_length
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.classifier_dropout = classifier_dropout
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.gradient_checkpointing = gradient_checkpointing
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.decoder_start_token_id = image_vocab_size, # BOS appended to vocab
+ self.min_length = image_length + 1
+ self.</s>
|
dalle_mini.modeling_bart_flax/FlaxBartDecoder.setup
|
Modified
|
borisdayma~dalle-mini
|
a11892f9f4570aed1ec5687a414ac02fb77a2cc6
|
fix(model): use correct params
|
<4>:<del> self.max_target_positions = self.config.max_position_embeddings
<8>:<add> self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
<del> self.config.decoder_vocab_size,
<17>:<add> self.config.image_length + 1 + self.offset, # image length + 1 for BOS
<del> self.config.decoder_max_position_embeddings + self.offset,
|
# module: dalle_mini.modeling_bart_flax
class FlaxBartDecoder(nn.Module):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.max_target_positions = self.config.max_position_embeddings
<5> self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
<6>
<7> self.embed_tokens = nn.Embed(
<8> self.config.decoder_vocab_size,
<9> embed_dim,
<10> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<11> )
<12>
<13> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<14> # and adjust num_embeddings appropriately. Other models don't have this hack
<15> self.offset = 0
<16> self.embed_positions = nn.Embed(
<17> self.config.decoder_max_position_embeddings + self.offset,
<18> embed_dim,
<19> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<20> )
<21>
<22> self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
<23> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
<24>
|
===========unchanged ref 0===========
at: dalle_mini.modeling_bart_flax.FlaxBartDecoder
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: math
sqrt(x: SupportsFloat, /) -> float
===========changed ref 0===========
# module: dalle_mini.modeling_bart_flax
class FlaxBartEncoder(nn.Module):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
- self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
self.embed_tokens = nn.Embed(
+ self.config.encoder_vocab_size,
- self.config.vocab_size,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
+ self.config.max_text_length + self.offset,
- self.config.max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
===========changed ref 1===========
# module: dalle_mini.modeling_bart_flax
class FlaxBartAttention(nn.Module):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
assert (
self.head_dim * self.num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
+ jnp.ones((1, embed_dim), dtype="bool"), dtype="bool"
- jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
===========changed ref 2===========
# module: dalle_mini.configuration_bart
-
-
===========changed ref 3===========
# module: dalle_mini.configuration_bart
logger = logging.get_logger(__name__)
- BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
- "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
- # See all BART models at https://huggingface.co/models?filter=bart
- }
===========changed ref 4===========
<s>checkpointing=False,
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
- self.vocab_size = vocab_size
- self.decoder_vocab_size = decoder_vocab_size
- self.max_position_embeddings = max_position_embeddings
- self.decoder_max_position_embeddings = decoder_max_position_embeddings
- self.d_model = d_model
- self.encoder_ffn_dim = encoder_ffn_dim
- self.encoder_layers = encoder_layers
- self.encoder_attention_heads = encoder_attention_heads
- self.decoder_ffn_dim = decoder_ffn_dim
- self.decoder_layers = decoder_layers
- self.decoder_attention_heads = decoder_attention_heads
- self.dropout = dropout
- self.attention_dropout = attention_dropout
- self.activation_dropout = activation_dropout
- self.activation_function = activation_function
- self.init_std = init_std
- self.encoder_layerdrop = encoder_layerdrop
- self.decoder_layerdrop = decoder_layerdrop
- self.classifier_dropout = classifier_dropout
- self.use_cache = use_cache
- self.num_hidden_layers = encoder_layers
- self.gradient_checkpointing = gradient_checkpointing
- self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
-
- super().__init__(
- num_labels=num_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_</s>
===========changed ref 5===========
<s>
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
# offset: 1
<s>_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_token_id,
- eos_token_id=eos_token_id,
- is_encoder_decoder=is_encoder_decoder,
- decoder_start_token_id=decoder_start_token_id,
- forced_eos_token_id=forced_eos_token_id,
- tie_word_embeddings=tie_word_embeddings,
- **kwargs,
- )
-
- # ensure backward compatibility for BART CNN models
- if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
- self.forced_bos_token_id = self.bos_token_id
- warnings.warn(
- f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
- "The config can simply be saved and uploaded again to be fixed."
- )
-
|
dalle_mini.modeling_bart_flax/FlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
a11892f9f4570aed1ec5687a414ac02fb77a2cc6
|
fix(model): use correct params
|
<2>:<add> self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
<del> self.config.decoder_vocab_size,
<7>:<add> self.final_logits_bias = self.param(
<add> "final_logits_bias", self.bias_init, (1, self.config.image_vocab_size + 1)
<add> )
|
# module: dalle_mini.modeling_bart_flax
class FlaxBartForConditionalGenerationModule(nn.Module):
def setup(self):
<0> self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
<1> self.lm_head = nn.Dense(
<2> self.config.decoder_vocab_size,
<3> use_bias=False,
<4> dtype=self.dtype,
<5> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<6> )
<7>
|
===========unchanged ref 0===========
at: dalle_mini.modeling_bart_flax.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
===========changed ref 0===========
# module: dalle_mini.modeling_bart_flax
class FlaxBartDecoder(nn.Module):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
- self.max_target_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
self.embed_tokens = nn.Embed(
+ self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
- self.config.decoder_vocab_size,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
+ self.config.image_length + 1 + self.offset, # image length + 1 for BOS
- self.config.decoder_max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
===========changed ref 1===========
# module: dalle_mini.modeling_bart_flax
class FlaxBartEncoder(nn.Module):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
- self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
self.embed_tokens = nn.Embed(
+ self.config.encoder_vocab_size,
- self.config.vocab_size,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
+ self.config.max_text_length + self.offset,
- self.config.max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
===========changed ref 2===========
# module: dalle_mini.modeling_bart_flax
class FlaxBartAttention(nn.Module):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
assert (
self.head_dim * self.num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
+ jnp.ones((1, embed_dim), dtype="bool"), dtype="bool"
- jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
===========changed ref 3===========
# module: dalle_mini.configuration_bart
-
-
===========changed ref 4===========
# module: dalle_mini.configuration_bart
logger = logging.get_logger(__name__)
- BART_PRETRAINED_CONFIG_ARCHIVE_MAP = {
- "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
- # See all BART models at https://huggingface.co/models?filter=bart
- }
===========changed ref 5===========
<s>checkpointing=False,
- use_cache=True,
- num_labels=3,
- pad_token_id=1,
- bos_token_id=0,
- eos_token_id=2,
- is_encoder_decoder=True,
- decoder_start_token_id=16384,
- forced_eos_token_id=2,
- tie_word_embeddings=False, # don't tie for scaling reasons
- **kwargs,
- ):
- self.vocab_size = vocab_size
- self.decoder_vocab_size = decoder_vocab_size
- self.max_position_embeddings = max_position_embeddings
- self.decoder_max_position_embeddings = decoder_max_position_embeddings
- self.d_model = d_model
- self.encoder_ffn_dim = encoder_ffn_dim
- self.encoder_layers = encoder_layers
- self.encoder_attention_heads = encoder_attention_heads
- self.decoder_ffn_dim = decoder_ffn_dim
- self.decoder_layers = decoder_layers
- self.decoder_attention_heads = decoder_attention_heads
- self.dropout = dropout
- self.attention_dropout = attention_dropout
- self.activation_dropout = activation_dropout
- self.activation_function = activation_function
- self.init_std = init_std
- self.encoder_layerdrop = encoder_layerdrop
- self.decoder_layerdrop = decoder_layerdrop
- self.classifier_dropout = classifier_dropout
- self.use_cache = use_cache
- self.num_hidden_layers = encoder_layers
- self.gradient_checkpointing = gradient_checkpointing
- self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
-
- super().__init__(
- num_labels=num_labels,
- pad_token_id=pad_token_id,
- bos_token_id=bos_</s>
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
a96f4dc5fdb5464c707ef3613c8b1051d1ea73e6
|
fix: adjust training script + dataloader
|
<41>:<add> max_length=max_length,
<del> max_source_length=self.max_source_length,
|
# module: dalle_mini.data
@dataclass
class Dataset:
+ def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
- def preprocess(self, tokenizer, decoder_start_token_id, normalize_text):
<0> if self.streaming:
<1> # we need to shuffle early in streaming mode
<2> if hasattr(self, "train_dataset"):
<3> self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
<4> else:
<5> # prepare rng for later shuffling
<6> if self.seed_dataset is None:
<7> self.seed_dataset = np.random.get_state()[1][0]
<8> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<9>
<10> # normalize text
<11> if normalize_text:
<12> text_normalizer = TextNormalizer()
<13> partial_normalize_function = partial(
<14> normalize_function,
<15> text_column=self.text_column,
<16> text_normalizer=text_normalizer,
<17> )
<18> for ds in ["train_dataset", "eval_dataset"]:
<19> if hasattr(self, ds):
<20> setattr(
<21> self,
<22> ds,
<23> (
<24> getattr(self, ds).map(partial_normalize_function)
<25> if self.streaming
<26> else getattr(self, ds).map(
<27> partial_normalize_function,
<28> num_proc=self.preprocessing_num_workers,
<29> load_from_cache_file=not self.overwrite_cache,
<30> desc="Normalizing datasets",
<31> )
<32> ),
<33> )
<34>
<35> # preprocess
<36> partial_preprocess_function = partial(
<37> preprocess_function,
<38> tokenizer=tokenizer,
<39> text_column=self.text_column,
<40> encoding_column=self.encoding_column,
<41> max_source_length=self.max_source_length,
<42> decoder_start_token_id=</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
+ def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
- def preprocess(self, tokenizer, decoder_start_token_id, normalize_text):
# offset: 1
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
normalize_function(example, text_column, text_normalizer)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
at: dalle_mini.text
TextNormalizer()
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> "Dataset"
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 2===========
at: datasets.dataset_dict.DatasetDict
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> "DatasetDict"
shuffle(seeds: Optional[Union[int, Dict[str, Optional[int]]]]=None, seed: Optional[int]=None, generators: Optional[Dict[str, np.random.Generator]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> "DatasetDict"
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: numpy.random.mtrand
get_state = _rand.get_state
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
- dataset_type: str = "dataset"
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
- max_source_length: int = 128
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
===========changed ref 1===========
+ # module: dalle_mini.model.partitions
+
+
===========changed ref 2===========
+ # module: dalle_mini.model.modeling
+
+
===========changed ref 3===========
+ # module: dalle_mini.model.configuration
+
+
===========changed ref 4===========
- # module: dalle_mini.model
-
-
===========changed ref 5===========
+ # module: dalle_mini.model.modeling
+ class DalleBartModule(nn.Module):
+ def _get_decoder_module(self):
+ return self.decoder
+
===========changed ref 6===========
+ # module: dalle_mini.model.modeling
+ class DalleBartModule(nn.Module):
+ def _get_encoder_module(self):
+ return self.encoder
+
===========changed ref 7===========
+ # module: dalle_mini.model.modeling
+ class DalleBartForConditionalGenerationModule(nn.Module):
+ def _get_decoder_module(self):
+ return self.model.decoder
+
===========changed ref 8===========
+ # module: dalle_mini.model.modeling
+ class DalleBartForConditionalGenerationModule(nn.Module):
+ def _get_encoder_module(self):
+ return self.model.encoder
+
===========changed ref 9===========
+ # module: dalle_mini.model.modeling
+ logger = logging.get_logger(__name__)
+
===========changed ref 10===========
+ # module: dalle_mini.model.configuration
+ logger = logging.get_logger(__name__)
+
===========changed ref 11===========
- # module: dalle_mini.model
- class CustomFlaxBartForConditionalGeneration(FlaxBartForConditionalGeneration):
- module_class = CustomFlaxBartForConditionalGenerationModule
-
|
dalle_mini.data/preprocess_function
|
Modified
|
borisdayma~dalle-mini
|
a96f4dc5fdb5464c707ef3613c8b1051d1ea73e6
|
fix: adjust training script + dataloader
|
<4>:<add> max_length=max_length,
<del> max_length=max_source_length,
|
# module: dalle_mini.data
def preprocess_function(
examples,
tokenizer,
text_column,
encoding_column,
+ max_length,
- max_source_length,
decoder_start_token_id,
):
<0> inputs = examples[text_column]
<1> # Setting padding="max_length" as we need fixed length inputs for jitted functions
<2> model_inputs = tokenizer(
<3> inputs,
<4> max_length=max_source_length,
<5> padding="max_length",
<6> truncation=True,
<7> return_tensors="np",
<8> )
<9>
<10> # set up targets
<11> # Note: labels correspond to our target indices
<12> # decoder input ids are the same but shifted to the right with bos at the beginning (and without last token)
<13> labels = examples[encoding_column]
<14> labels = np.asarray(labels)
<15>
<16> # We need the labels, in addition to the decoder_input_ids, for the compute_loss function
<17> model_inputs["labels"] = labels
<18>
<19> # In our case, this prepends the bos token and removes the last one
<20> decoder_input_ids = shift_tokens_right(labels, decoder_start_token_id)
<21> model_inputs["decoder_input_ids"] = decoder_input_ids
<22>
<23> return model_inputs
<24>
|
===========unchanged ref 0===========
at: dalle_mini.data
shift_tokens_right(input_ids: np.array, decoder_start_token_id: int)
at: numpy.core._multiarray_umath
asarray(a, dtype=None, order=None, *, like=None, /)
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
- dataset_type: str = "dataset"
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
- max_source_length: int = 128
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
===========changed ref 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
+ def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
- def preprocess(self, tokenizer, decoder_start_token_id, normalize_text):
if self.streaming:
# we need to shuffle early in streaming mode
if hasattr(self, "train_dataset"):
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
else:
# prepare rng for later shuffling
if self.seed_dataset is None:
self.seed_dataset = np.random.get_state()[1][0]
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
# normalize text
if normalize_text:
text_normalizer = TextNormalizer()
partial_normalize_function = partial(
normalize_function,
text_column=self.text_column,
text_normalizer=text_normalizer,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(partial_normalize_function)
if self.streaming
else getattr(self, ds).map(
partial_normalize_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Normalizing datasets",
)
),
)
# preprocess
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
+ max_length=max_length,
- max_source_length=self.max_source_length,
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr</s>
===========changed ref 2===========
# module: dalle_mini.data
@dataclass
class Dataset:
+ def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
- def preprocess(self, tokenizer, decoder_start_token_id, normalize_text):
# offset: 1
<s>_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========changed ref 3===========
+ # module: dalle_mini.model.partitions
+
+
===========changed ref 4===========
+ # module: dalle_mini.model.modeling
+
+
===========changed ref 5===========
+ # module: dalle_mini.model.configuration
+
+
===========changed ref 6===========
- # module: dalle_mini.model
-
-
===========changed ref 7===========
+ # module: dalle_mini.model.modeling
+ class DalleBartModule(nn.Module):
+ def _get_decoder_module(self):
+ return self.decoder
+
===========changed ref 8===========
+ # module: dalle_mini.model.modeling
+ class DalleBartModule(nn.Module):
+ def _get_encoder_module(self):
+ return self.encoder
+
===========changed ref 9===========
+ # module: dalle_mini.model.modeling
+ class DalleBartForConditionalGenerationModule(nn.Module):
+ def _get_decoder_module(self):
+ return self.model.decoder
+
===========changed ref 10===========
+ # module: dalle_mini.model.modeling
+ class DalleBartForConditionalGenerationModule(nn.Module):
+ def _get_encoder_module(self):
+ return self.model.encoder
+
===========changed ref 11===========
+ # module: dalle_mini.model.modeling
+ logger = logging.get_logger(__name__)
+
===========changed ref 12===========
+ # module: dalle_mini.model.configuration
+ logger = logging.get_logger(__name__)
+
===========changed ref 13===========
- # module: dalle_mini.model
- class CustomFlaxBartForConditionalGeneration(FlaxBartForConditionalGeneration):
- module_class = CustomFlaxBartForConditionalGenerationModule
-
===========changed ref 14===========
+ # module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32
+
===========changed ref 15===========
+ # module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(nn.Module):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32
+
===========changed ref 16===========
+ # module: dalle_mini.model.modeling
+ class FlaxBartAttention(nn.Module):
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
===========changed ref 17===========
+ # module: dalle_mini.model.modeling
+ class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
+ module_class = DalleBartForConditionalGenerationModule
+ dtype: jnp.dtype = jnp.float32
+
===========changed ref 18===========
+ # module: dalle_mini.model.modeling
+ class DalleBartModule(nn.Module):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
===========changed ref 19===========
+ # module: dalle_mini.model.modeling
+ class DalleBartDecoder(nn.Module):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
===========changed ref 20===========
+ # module: dalle_mini.model.modeling
+ class DalleBartEncoder(nn.Module):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
===========changed ref 21===========
+ # module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
972bc8d2443eab9ccdae4c1a33fbd0ccb1083d7f
|
refactor(model): inherit from HF Flax & simplify
|
<1>:<del> assert (
<2>:<add> if self.head_dim * self.num_heads != self.embed_dim:
<del> self.head_dim * self.num_heads == self.embed_dim
<3>:<add> raise ValueError(
<add> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<del> ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
<4>:<add> f" and `num_heads`: {self.num_heads})."
<add> )
<8>:<add> use_bias=self.bias,
<del> use_bias=False,
|
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> assert (
<2> self.head_dim * self.num_heads == self.embed_dim
<3> ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
<4>
<5> dense = partial(
<6> nn.Dense,
<7> self.embed_dim,
<8> use_bias=False,
<9> dtype=self.dtype,
<10> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<11> )
<12>
<13> self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
<14> self.out_proj = dense()
<15>
<16> self.dropout_layer = nn.Dropout(rate=self.dropout)
<17>
<18> if self.causal:
<19> self.causal_mask = make_causal_mask(
<20> jnp.ones((1, self.embed_dim), dtype="bool"), dtype="bool"
<21> )
<22>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartAttention.setup
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer
config: BartConfig
dtype: jnp.dtype = jnp.float32
setup(self) -> None
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- config: DalleBartConfig
- embed_dim: int
- num_heads: int
- dropout: float = 0.0
- causal: bool = False
- bias: bool = True
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ """
+ Edits:
+ - causal mask considers embed_dim instead of max_position_embeddings
+ """
===========changed ref 1===========
# module: dalle_mini.model.modeling
- def shift_tokens_right(
- input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
- ) -> np.ndarray:
- """
- Shift input ids one token to the right.
- """
- shifted_input_ids = np.zeros_like(input_ids)
- shifted_input_ids[:, 1:] = input_ids[:, :-1]
- shifted_input_ids[:, 0] = decoder_start_token_id
-
- shifted_input_ids = np.where(
- shifted_input_ids == -100, pad_token_id, shifted_input_ids
- )
- return shifted_input_ids
-
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.