A Metamask fork with Infura removed and default networks editable
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
ciphermask/patches/bip39+2.5.0.patch

99 lines
3.3 KiB

diff --git a/node_modules/bip39/index.js b/node_modules/bip39/index.js
index aa0f29f..bee8008 100644
--- a/node_modules/bip39/index.js
+++ b/node_modules/bip39/index.js
@@ -48,7 +48,9 @@ function salt (password) {
}
function mnemonicToSeed (mnemonic, password) {
- var mnemonicBuffer = Buffer.from(unorm.nfkd(mnemonic), 'utf8')
+ var mnemonicBuffer = typeof mnemonic === 'string'
+ ? Buffer.from(unorm.nfkd(mnemonic), 'utf8')
+ : mnemonic
var saltBuffer = Buffer.from(salt(unorm.nfkd(password)), 'utf8')
return pbkdf2(mnemonicBuffer, saltBuffer, 2048, 64, 'sha512')
@@ -61,12 +63,28 @@ function mnemonicToSeedHex (mnemonic, password) {
function mnemonicToEntropy (mnemonic, wordlist) {
wordlist = wordlist || DEFAULT_WORDLIST
- var words = unorm.nfkd(mnemonic).split(' ')
+ var mnemonicAsBuffer = typeof mnemonic === 'string'
+ ? Buffer.from(unorm.nfkd(mnemonic), 'utf8')
+ : mnemonic
+
+ var words = [];
+ var currentWord = [];
+ for (const byte of mnemonicAsBuffer.values()) {
+ // split at space or \u3000 (ideographic space, for Japanese wordlists)
+ if (byte === 0x20 || byte === 0x3000) {
+ words.push(Buffer.from(currentWord));
+ currentWord = [];
+ } else {
+ currentWord.push(byte);
+ }
+ }
+ words.push(Buffer.from(currentWord));
+
if (words.length % 3 !== 0) throw new Error(INVALID_MNEMONIC)
// convert word indices to 11 bit binary strings
var bits = words.map(function (word) {
- var index = wordlist.indexOf(word)
+ var index = wordlist.indexOf(word.toString('utf8'))
if (index === -1) throw new Error(INVALID_MNEMONIC)
return lpad(index.toString(2), '0', 11)
@@ -104,12 +122,41 @@ function entropyToMnemonic (entropy, wordlist) {
var bits = entropyBits + checksumBits
var chunks = bits.match(/(.{1,11})/g)
- var words = chunks.map(function (binary) {
+ var wordsAsBuffers = chunks.map(function (binary) {
var index = binaryToByte(binary)
- return wordlist[index]
+ return Buffer.from(wordlist[index], 'utf8')
})
- return wordlist === JAPANESE_WORDLIST ? words.join('\u3000') : words.join(' ')
+ var bufferSize = wordsAsBuffers.reduce(function (bufferSize, wordAsBuffer, i) {
+ var shouldAddSeparator = i < wordsAsBuffers.length - 1
+ return (
+ bufferSize +
+ wordAsBuffer.length +
+ (shouldAddSeparator ? 1 : 0)
+ )
+ }, 0)
+ var separator = wordlist === JAPANESE_WORDLIST ? '\u3000' : ' '
+ var result = wordsAsBuffers.reduce(function (result, wordAsBuffer, i) {
+ var shouldAddSeparator = i < wordsAsBuffers.length - 1
+ result.workingBuffer.set(wordAsBuffer, result.offset)
+ if (shouldAddSeparator) {
+ result.workingBuffer.write(
+ separator,
+ result.offset + wordAsBuffer.length,
+ separator.length,
+ 'utf8'
+ )
+ }
+ return {
+ workingBuffer: result.workingBuffer,
+ offset: (
+ result.offset +
+ wordAsBuffer.length +
+ (shouldAddSeparator ? 1 : 0)
+ )
+ }
+ }, { workingBuffer: Buffer.alloc(bufferSize), offset: 0 })
+ return result.workingBuffer;
}
function generateMnemonic (strength, rng, wordlist) {
@@ -124,6 +171,7 @@ function validateMnemonic (mnemonic, wordlist) {
try {
mnemonicToEntropy(mnemonic, wordlist)
} catch (e) {
+ console.log('could not validate mnemonic', e)
return false
}