Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
D
douban-api-proxy
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
captainwong
douban-api-proxy
Commits
7982ed87
Commit
7982ed87
authored
May 13, 2019
by
subdiox
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Downgrade bitjs to es5 branch
parent
c0d136cc
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
2176 additions
and
2691 deletions
+2176
-2691
archive.js
cps/static/js/archive/archive.js
+249
-236
rarvm.js
cps/static/js/archive/rarvm.js
+473
-664
unrar.js
cps/static/js/archive/unrar.js
+458
-559
untar.js
cps/static/js/archive/untar.js
+136
-152
unzip.js
cps/static/js/archive/unzip.js
+469
-497
bitstream.js
cps/static/js/io/bitstream.js
+184
-237
bytebuffer.js
cps/static/js/io/bytebuffer.js
+88
-83
bytestream.js
cps/static/js/io/bytestream.js
+118
-262
kthoom.js
cps/static/js/kthoom.js
+1
-1
No files found.
cps/static/js/archive/archive.js
View file @
7982ed87
...
...
@@ -11,21 +11,65 @@
var
bitjs
=
bitjs
||
{};
bitjs
.
archive
=
bitjs
.
archive
||
{};
(
function
()
{
// ===========================================================================
// Stolen from Closure because it's the best way to do Java-like inheritance.
bitjs
.
base
=
function
(
me
,
opt_methodName
,
var_args
)
{
var
caller
=
arguments
.
callee
.
caller
;
if
(
caller
.
superClass_
)
{
// This is a constructor. Call the superclass constructor.
return
caller
.
superClass_
.
constructor
.
apply
(
me
,
Array
.
prototype
.
slice
.
call
(
arguments
,
1
));
}
var
args
=
Array
.
prototype
.
slice
.
call
(
arguments
,
2
);
var
foundCaller
=
false
;
for
(
var
ctor
=
me
.
constructor
;
ctor
;
ctor
=
ctor
.
superClass_
&&
ctor
.
superClass_
.
constructor
)
{
if
(
ctor
.
prototype
[
opt_methodName
]
===
caller
)
{
foundCaller
=
true
;
}
else
if
(
foundCaller
)
{
return
ctor
.
prototype
[
opt_methodName
].
apply
(
me
,
args
);
}
}
// If we did not find the caller in the prototype chain,
// then one of two things happened:
// 1) The caller is an instance method.
// 2) This method was not called by the right caller.
if
(
me
[
opt_methodName
]
===
caller
)
{
return
me
.
constructor
.
prototype
[
opt_methodName
].
apply
(
me
,
args
);
}
else
{
throw
Error
(
'goog.base called from a method of one name '
+
'to a method of a different name'
);
}
};
bitjs
.
inherits
=
function
(
childCtor
,
parentCtor
)
{
/** @constructor */
function
tempCtor
()
{};
tempCtor
.
prototype
=
parentCtor
.
prototype
;
childCtor
.
superClass_
=
parentCtor
.
prototype
;
childCtor
.
prototype
=
new
tempCtor
();
childCtor
.
prototype
.
constructor
=
childCtor
;
};
// ===========================================================================
/**
* An unarchive event.
*
* @param {string} type The event type.
* @constructor
*/
bitjs
.
archive
.
UnarchiveEvent
=
class
{
bitjs
.
archive
.
UnarchiveEvent
=
function
(
type
)
{
/**
* @param {string} type The event type.
* The event type.
*
* @type {string}
*/
constructor
(
type
)
{
/**
* The event type.
* @type {string}
*/
this
.
type
=
type
;
}
}
this
.
type
=
type
;
};
/**
* The UnarchiveEvent types.
...
...
@@ -41,102 +85,78 @@ bitjs.archive.UnarchiveEvent.Type = {
/**
* Useful for passing info up to the client (for debugging).
*
* @param {string} msg The info message.
*/
bitjs
.
archive
.
UnarchiveInfoEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
bitjs
.
archive
.
UnarchiveInfoEvent
=
function
(
msg
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
INFO
);
/**
* @param {string} msg The info message.
* The information message.
*
* @type {string}
*/
constructor
(
msg
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
INFO
);
/**
* The information message.
* @type {string}
*/
this
.
msg
=
msg
;
}
}
this
.
msg
=
msg
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveInfoEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* An unrecoverable error has occured.
*
* @param {string} msg The error message.
*/
bitjs
.
archive
.
UnarchiveErrorEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
bitjs
.
archive
.
UnarchiveErrorEvent
=
function
(
msg
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
ERROR
);
/**
* @param {string} msg The error message.
* The information message.
*
* @type {string}
*/
constructor
(
msg
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
ERROR
);
/**
* The information message.
* @type {string}
*/
this
.
msg
=
msg
;
}
}
this
.
msg
=
msg
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveErrorEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Start event.
*
* @param {string} msg The info message.
*/
bitjs
.
archive
.
UnarchiveStartEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
constructor
()
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
START
);
}
}
bitjs
.
archive
.
UnarchiveStartEvent
=
function
()
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
START
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveStartEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Finish event.
*
* @param {string} msg The info message.
*/
bitjs
.
archive
.
UnarchiveFinishEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
constructor
()
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
);
}
}
bitjs
.
archive
.
UnarchiveFinishEvent
=
function
()
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveFinishEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Progress event.
*/
bitjs
.
archive
.
UnarchiveProgressEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
/**
* @param {string} currentFilename
* @param {number} currentFileNumber
* @param {number} currentBytesUnarchivedInFile
* @param {number} currentBytesUnarchived
* @param {number} totalUncompressedBytesInArchive
* @param {number} totalFilesInArchive
* @param {number} totalCompressedBytesRead
*/
constructor
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
totalCompressedBytesRead
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
PROGRESS
);
this
.
currentFilename
=
currentFilename
;
this
.
currentFileNumber
=
currentFileNumber
;
this
.
currentBytesUnarchivedInFile
=
currentBytesUnarchivedInFile
;
this
.
totalFilesInArchive
=
totalFilesInArchive
;
this
.
currentBytesUnarchived
=
currentBytesUnarchived
;
this
.
totalUncompressedBytesInArchive
=
totalUncompressedBytesInArchive
;
this
.
totalCompressedBytesRead
=
totalCompressedBytesRead
;
}
}
/**
* Extract event.
*/
bitjs
.
archive
.
UnarchiveExtractEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
/**
* @param {UnarchivedFile} unarchivedFile
*/
constructor
(
unarchivedFile
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
EXTRACT
);
/**
* @type {UnarchivedFile}
*/
this
.
unarchivedFile
=
unarchivedFile
;
}
}
bitjs
.
archive
.
UnarchiveProgressEvent
=
function
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
PROGRESS
);
this
.
currentFilename
=
currentFilename
;
this
.
currentFileNumber
=
currentFileNumber
;
this
.
currentBytesUnarchivedInFile
=
currentBytesUnarchivedInFile
;
this
.
totalFilesInArchive
=
totalFilesInArchive
;
this
.
currentBytesUnarchived
=
currentBytesUnarchived
;
this
.
totalUncompressedBytesInArchive
=
totalUncompressedBytesInArchive
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveProgressEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* All extracted files returned by an Unarchiver will implement
...
...
@@ -150,190 +170,185 @@ bitjs.archive.UnarchiveExtractEvent = class extends bitjs.archive.UnarchiveEvent
*/
/**
*
Base class for all Unarchivers
.
*
Extract event
.
*/
bitjs
.
archive
.
Unarchiver
=
class
{
bitjs
.
archive
.
UnarchiveExtractEvent
=
function
(
unarchivedFile
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
EXTRACT
);
/**
* @param {ArrayBuffer} arrayBuffer The Array Buffer.
* @param {string} opt_pathToBitJS Optional string for where the BitJS files are located.
* @type {UnarchivedFile}
*/
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
/**
* The ArrayBuffer object.
* @type {ArrayBuffer}
* @protected
*/
this
.
ab
=
arrayBuffer
;
/**
* The path to the BitJS files.
* @type {string}
* @private
*/
this
.
pathToBitJS_
=
opt_pathToBitJS
||
'/'
;
/**
* A map from event type to an array of listeners.
* @type {Map.<string, Array>}
*/
this
.
listeners_
=
{};
for
(
let
type
in
bitjs
.
archive
.
UnarchiveEvent
.
Type
)
{
this
.
listeners_
[
bitjs
.
archive
.
UnarchiveEvent
.
Type
[
type
]]
=
[];
}
this
.
unarchivedFile
=
unarchivedFile
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveExtractEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Private web worker initialized during start().
* @type {Worker}
* @private
*/
this
.
worker_
=
null
;
}
/**
* Base class for all Unarchivers.
*
* @param {ArrayBuffer} arrayBuffer The Array Buffer.
* @param {string} opt_pathToBitJS Optional string for where the BitJS files are located.
* @constructor
*/
bitjs
.
archive
.
Unarchiver
=
function
(
arrayBuffer
,
opt_pathToBitJS
)
{
/**
* Th
is method must be overridden by the subclass to return the script filename
.
* @
return {string} The script filename.
* @protected
.
* Th
e ArrayBuffer object
.
* @
type {ArrayBuffer}
* @protected
*/
getScriptFileName
()
{
throw
'Subclasses of AbstractUnarchiver must overload getScriptFileName()'
;
}
this
.
ab
=
arrayBuffer
;
/**
* Adds an event listener for UnarchiveEvents.
*
* @param {string} Event type.
* @param {function} An event handler function.
* The path to the BitJS files.
* @type {string}
* @private
*/
addEventListener
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
if
(
this
.
listeners_
[
type
].
indexOf
(
listener
)
==
-
1
)
{
this
.
listeners_
[
type
].
push
(
listener
);
}
}
}
this
.
pathToBitJS_
=
opt_pathToBitJS
||
'/'
;
/**
* Removes an event listener.
*
* @param {string} Event type.
* @param {EventListener|function} An event listener or handler function.
* A map from event type to an array of listeners.
* @type {Map.<string, Array>}
*/
removeEventListener
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
const
index
=
this
.
listeners_
[
type
].
indexOf
(
listener
);
if
(
index
!=
-
1
)
{
this
.
listeners_
[
type
].
splice
(
index
,
1
);
}
}
this
.
listeners_
=
{};
for
(
var
type
in
bitjs
.
archive
.
UnarchiveEvent
.
Type
)
{
this
.
listeners_
[
bitjs
.
archive
.
UnarchiveEvent
.
Type
[
type
]]
=
[];
}
};
/**
* Receive an event and pass it to the listener functions.
*
* @param {bitjs.archive.UnarchiveEvent} e
* @private
*/
handleWorkerEvent_
(
e
)
{
if
((
e
instanceof
bitjs
.
archive
.
UnarchiveEvent
||
e
.
type
)
&&
this
.
listeners_
[
e
.
type
]
instanceof
Array
)
{
this
.
listeners_
[
e
.
type
].
forEach
(
function
(
listener
)
{
listener
(
e
)
});
if
(
e
.
type
==
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
)
{
this
.
worker_
.
terminate
();
}
}
else
{
console
.
log
(
e
);
}
}
/**
* Private web worker initialized during start().
* @type {Worker}
* @private
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
worker_
=
null
;
/**
* Starts the unarchive in a separate Web Worker thread and returns immediately.
*/
start
()
{
const
me
=
this
;
const
scriptFileName
=
this
.
pathToBitJS_
+
this
.
getScriptFileName
();
if
(
scriptFileName
)
{
this
.
worker_
=
new
Worker
(
scriptFileName
);
this
.
worker_
.
onerror
=
function
(
e
)
{
console
.
log
(
'Worker error: message = '
+
e
.
message
);
throw
e
;
};
this
.
worker_
.
onmessage
=
function
(
e
)
{
if
(
typeof
e
.
data
==
'string'
)
{
// Just log any strings the workers pump our way.
console
.
log
(
e
.
data
);
}
else
{
// Assume that it is an UnarchiveEvent. Some browsers preserve the 'type'
// so that instanceof UnarchiveEvent returns true, but others do not.
me
.
handleWorkerEvent_
(
e
.
data
);
}
};
const
ab
=
this
.
ab
;
this
.
worker_
.
postMessage
({
file
:
ab
,
logToConsole
:
false
,
});
this
.
ab
=
null
;
/**
* This method must be overridden by the subclass to return the script filename.
* @return {string} The script filename.
* @protected.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
getScriptFileName
=
function
()
{
throw
'Subclasses of AbstractUnarchiver must overload getScriptFileName()'
;
};
/**
* Adds an event listener for UnarchiveEvents.
*
* @param {string} Event type.
* @param {function} An event handler function.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
addEventListener
=
function
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
if
(
this
.
listeners_
[
type
].
indexOf
(
listener
)
==
-
1
)
{
this
.
listeners_
[
type
].
push
(
listener
);
}
}
};
/**
* Adds more bytes to the unarchiver's Worker thread.
*/
update
(
ab
)
{
if
(
this
.
worker_
)
{
this
.
worker_
.
postMessage
({
bytes
:
ab
});
/**
* Removes an event listener.
*
* @param {string} Event type.
* @param {EventListener|function} An event listener or handler function.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
removeEventListener
=
function
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
var
index
=
this
.
listeners_
[
type
].
indexOf
(
listener
);
if
(
index
!=
-
1
)
{
this
.
listeners_
[
type
].
splice
(
index
,
1
);
}
}
};
/**
* Terminates the Web Worker for this Unarchiver and returns immediately.
*/
stop
()
{
if
(
this
.
worker_
)
{
this
.
worker_
.
terminate
();
/**
* Receive an event and pass it to the listener functions.
*
* @param {bitjs.archive.UnarchiveEvent} e
* @private
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
handleWorkerEvent_
=
function
(
e
)
{
if
((
e
instanceof
bitjs
.
archive
.
UnarchiveEvent
||
e
.
type
)
&&
this
.
listeners_
[
e
.
type
]
instanceof
Array
)
{
this
.
listeners_
[
e
.
type
].
forEach
(
function
(
listener
)
{
listener
(
e
)
});
if
(
e
.
type
==
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
)
{
this
.
worker_
.
terminate
();
}
}
else
{
console
.
log
(
e
);
}
}
}
;
/**
* Starts the unarchive in a separate Web Worker thread and returns immediately.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
start
=
function
()
{
var
me
=
this
;
var
scriptFileName
=
this
.
pathToBitJS_
+
this
.
getScriptFileName
();
if
(
scriptFileName
)
{
this
.
worker_
=
new
Worker
(
scriptFileName
);
this
.
worker_
.
onerror
=
function
(
e
)
{
console
.
log
(
'Worker error: message = '
+
e
.
message
);
throw
e
;
};
this
.
worker_
.
onmessage
=
function
(
e
)
{
if
(
typeof
e
.
data
==
'string'
)
{
// Just log any strings the workers pump our way.
console
.
log
(
e
.
data
);
}
else
{
// Assume that it is an UnarchiveEvent. Some browsers preserve the 'type'
// so that instanceof UnarchiveEvent returns true, but others do not.
me
.
handleWorkerEvent_
(
e
.
data
);
}
};
this
.
worker_
.
postMessage
({
file
:
this
.
ab
});
}
};
/**
*
Unzipper
*
Terminates the Web Worker for this Unarchiver and returns immediately.
*/
bitjs
.
archive
.
Un
zipper
=
class
extends
bitjs
.
archive
.
Unarchiver
{
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
super
(
arrayBuffer
,
opt_pathToBitJS
);
bitjs
.
archive
.
Un
archiver
.
prototype
.
stop
=
function
()
{
if
(
this
.
worker_
)
{
this
.
worker_
.
terminate
(
);
}
};
getScriptFileName
()
{
return
'archive/unzip.js'
;
}
}
/**
* Unzipper
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Unzipper
=
function
(
arrayBuffer
,
opt_pathToBitJS
)
{
bitjs
.
base
(
this
,
arrayBuffer
,
opt_pathToBitJS
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
Unzipper
,
bitjs
.
archive
.
Unarchiver
);
bitjs
.
archive
.
Unzipper
.
prototype
.
getScriptFileName
=
function
()
{
return
'unzip.js'
};
/**
* Unrarrer
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Unrarrer
=
class
extends
bitjs
.
archive
.
Unarchiver
{
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
super
(
arrayBuffer
,
opt_pathToBitJS
);
}
getScriptFileName
()
{
return
'archive/unrar.js'
;
}
}
bitjs
.
archive
.
Unrarrer
=
function
(
arrayBuffer
,
opt_pathToBitJS
)
{
bitjs
.
base
(
this
,
arrayBuffer
,
opt_pathToBitJS
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
Unrarrer
,
bitjs
.
archive
.
Unarchiver
);
bitjs
.
archive
.
Unrarrer
.
prototype
.
getScriptFileName
=
function
()
{
return
'unrar.js'
};
/**
* Untarrer
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Untarrer
=
class
extends
bitjs
.
archive
.
Unarchiver
{
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
super
(
arrayBuffer
,
opt_pathToBitJS
);
}
getScriptFileName
()
{
return
'archive/untar.js'
;
};
}
bitjs
.
archive
.
Untarrer
=
function
(
arrayBuffer
,
opt_pathToBitJS
)
{
bitjs
.
base
(
this
,
arrayBuffer
,
opt_pathToBitJS
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
Untarrer
,
bitjs
.
archive
.
Unarchiver
);
bitjs
.
archive
.
Untarrer
.
prototype
.
getScriptFileName
=
function
()
{
return
'untar.js'
};
/**
* Factory method that creates an unarchiver based on the byte signature found
...
...
@@ -343,20 +358,18 @@ bitjs.archive.Untarrer = class extends bitjs.archive.Unarchiver {
* @return {bitjs.archive.Unarchiver}
*/
bitjs
.
archive
.
GetUnarchiver
=
function
(
ab
,
opt_pathToBitJS
)
{
if
(
ab
.
byteLength
<
10
)
{
return
null
;
}
let
unarchiver
=
null
;
const
pathToBitJS
=
opt_pathToBitJS
||
''
;
const
h
=
new
Uint8Array
(
ab
,
0
,
10
);
var
unarchiver
=
null
;
var
pathToBitJS
=
opt_pathToBitJS
||
''
;
var
h
=
new
Uint8Array
(
ab
,
0
,
10
);
if
(
h
[
0
]
==
0x52
&&
h
[
1
]
==
0x61
&&
h
[
2
]
==
0x72
&&
h
[
3
]
==
0x21
)
{
// Rar!
unarchiver
=
new
bitjs
.
archive
.
Unrarrer
(
ab
,
pathToBitJS
);
}
else
if
(
h
[
0
]
==
0x50
&&
h
[
1
]
==
0x4B
)
{
// PK (Zip)
}
else
if
(
h
[
0
]
==
80
&&
h
[
1
]
==
75
)
{
// PK (Zip)
unarchiver
=
new
bitjs
.
archive
.
Unzipper
(
ab
,
pathToBitJS
);
}
else
{
// Try with tar
unarchiver
=
new
bitjs
.
archive
.
Untarrer
(
ab
,
pathToBitJS
);
}
return
unarchiver
;
};
})();
\ No newline at end of file
cps/static/js/archive/rarvm.js
View file @
7982ed87
...
...
@@ -9,42 +9,12 @@
/**
* CRC Implementation.
*/
const
CRCTab
=
new
Array
(
256
).
fill
(
0
);
// Helper functions between signed and unsigned integers.
/**
* -1 becomes 0xffffffff
*/
function
fromSigned32ToUnsigned32
(
val
)
{
return
(
val
<
0
)
?
(
val
+=
0x100000000
)
:
val
;
}
/**
* 0xffffffff becomes -1
*/
function
fromUnsigned32ToSigned32
(
val
)
{
return
(
val
>=
0x80000000
)
?
(
val
-=
0x100000000
)
:
val
;
}
/**
* -1 becomes 0xff
*/
function
fromSigned8ToUnsigned8
(
val
)
{
return
(
val
<
0
)
?
(
val
+=
0x100
)
:
val
;
}
/**
* 0xff becomes -1
*/
function
fromUnsigned8ToSigned8
(
val
)
{
return
(
val
>=
0x80
)
?
(
val
-=
0x100
)
:
val
;
}
var
CRCTab
=
new
Array
(
256
).
fill
(
0
);
function
InitCRC
()
{
for
(
let
i
=
0
;
i
<
256
;
++
i
)
{
let
c
=
i
;
for
(
let
j
=
0
;
j
<
8
;
++
j
)
{
for
(
var
i
=
0
;
i
<
256
;
++
i
)
{
var
c
=
i
;
for
(
var
j
=
0
;
j
<
8
;
++
j
)
{
// Read http://stackoverflow.com/questions/6798111/bitwise-operations-on-32-bit-unsigned-ints
// for the bitwise operator issue (JS interprets operands as 32-bit signed
// integers and we need to deal with unsigned ones here).
...
...
@@ -90,8 +60,8 @@ function CRC(startCRC, arr) {
#endif
*/
for
(
let
i
=
0
;
i
<
arr
.
length
;
++
i
)
{
const
byte
=
((
startCRC
^
arr
[
i
])
>>>
0
)
&
0xff
;
for
(
var
i
=
0
;
i
<
arr
.
length
;
++
i
)
{
var
byte
=
((
startCRC
^
arr
[
i
])
>>>
0
)
&
0xff
;
startCRC
=
(
CRCTab
[
byte
]
^
(
startCRC
>>>
8
))
>>>
0
;
}
...
...
@@ -104,17 +74,17 @@ function CRC(startCRC, arr) {
/**
* RarVM Implementation.
*/
const
VM_MEMSIZE
=
0x40000
;
const
VM_MEMMASK
=
(
VM_MEMSIZE
-
1
);
const
VM_GLOBALMEMADDR
=
0x3C000
;
const
VM_GLOBALMEMSIZE
=
0x2000
;
const
VM_FIXEDGLOBALSIZE
=
64
;
const
MAXWINSIZE
=
0x400000
;
const
MAXWINMASK
=
(
MAXWINSIZE
-
1
);
var
VM_MEMSIZE
=
0x40000
;
var
VM_MEMMASK
=
(
VM_MEMSIZE
-
1
);
var
VM_GLOBALMEMADDR
=
0x3C000
;
var
VM_GLOBALMEMSIZE
=
0x2000
;
var
VM_FIXEDGLOBALSIZE
=
64
;
var
MAXWINSIZE
=
0x400000
;
var
MAXWINMASK
=
(
MAXWINSIZE
-
1
);
/**
*/
const
VM_Commands
=
{
var
VM_Commands
=
{
VM_MOV
:
0
,
VM_CMP
:
1
,
VM_ADD
:
2
,
...
...
@@ -171,7 +141,7 @@ const VM_Commands = {
/**
*/
const
VM_StandardFilters
=
{
var
VM_StandardFilters
=
{
VMSF_NONE
:
0
,
VMSF_E8
:
1
,
VMSF_E8E9
:
2
,
...
...
@@ -184,7 +154,7 @@ const VM_StandardFilters = {
/**
*/
const
VM_Flags
=
{
var
VM_Flags
=
{
VM_FC
:
1
,
VM_FZ
:
2
,
VM_FS
:
0x80000000
,
...
...
@@ -192,7 +162,7 @@ const VM_Flags = {
/**
*/
const
VM_OpType
=
{
var
VM_OpType
=
{
VM_OPREG
:
0
,
VM_OPINT
:
1
,
VM_OPREGMEM
:
2
,
...
...
@@ -207,7 +177,7 @@ const VM_OpType = {
* @return {string} The key/enum value as a string.
*/
function
findKeyForValue
(
obj
,
val
)
{
for
(
let
key
in
obj
)
{
for
(
var
key
in
obj
)
{
if
(
obj
[
key
]
===
val
)
{
return
key
;
}
...
...
@@ -216,7 +186,7 @@ function findKeyForValue(obj, val) {
}
function
getDebugString
(
obj
,
val
)
{
let
s
=
'Unknown.'
;
var
s
=
'Unknown.'
;
if
(
obj
===
VM_Commands
)
{
s
=
'VM_Commands.'
;
}
else
if
(
obj
===
VM_StandardFilters
)
{
...
...
@@ -231,144 +201,144 @@ function getDebugString(obj, val) {
}
/**
* @struct
* @constructor
*/
class
VM_PreparedOperand
{
constructor
()
{
/** @type {VM_OpType} */
this
.
Type
;
/** @type {number} */
this
.
Data
=
0
;
/** @type {number} */
this
.
Base
=
0
;
// TODO: In C++ this is a uint*
/** @type {Array<number>} */
this
.
Addr
=
null
;
};
/** @return {string} */
toString
()
{
if
(
this
.
Type
===
null
)
{
return
'Error: Type was null in VM_PreparedOperand'
;
}
return
'{ '
+
'Type: '
+
getDebugString
(
VM_OpType
,
this
.
Type
)
+
', Data: '
+
this
.
Data
+
', Base: '
+
this
.
Base
+
' }'
;
var
VM_PreparedOperand
=
function
()
{
/** @type {VM_OpType} */
this
.
Type
;
/** @type {number} */
this
.
Data
=
0
;
/** @type {number} */
this
.
Base
=
0
;
// TODO: In C++ this is a uint*
/** @type {Array<number>} */
this
.
Addr
=
null
;
};
/** @return {string} */
VM_PreparedOperand
.
prototype
.
toString
=
function
()
{
if
(
this
.
Type
===
null
)
{
return
'Error: Type was null in VM_PreparedOperand'
;
}
}
return
'{ '
+
'Type: '
+
getDebugString
(
VM_OpType
,
this
.
Type
)
+
', Data: '
+
this
.
Data
+
', Base: '
+
this
.
Base
+
' }'
;
};
/**
* @struct
* @constructor
*/
class
VM_PreparedCommand
{
constructor
()
{
/** @type {VM_Commands} */
this
.
OpCode
;
var
VM_PreparedCommand
=
function
()
{
/** @type {VM_Commands} */
this
.
OpCode
;
/** @type {boolean} */
this
.
ByteMode
=
false
;
/** @type {boolean} */
this
.
ByteMode
=
false
;
/** @type {VM_PreparedOperand} */
this
.
Op1
=
new
VM_PreparedOperand
();
/** @type {VM_PreparedOperand} */
this
.
Op1
=
new
VM_PreparedOperand
();
/** @type {VM_PreparedOperand} */
this
.
Op2
=
new
VM_PreparedOperand
();
}
/** @type {VM_PreparedOperand} */
this
.
Op2
=
new
VM_PreparedOperand
();
};
/** @return {string} */
toString
(
indent
)
{
if
(
this
.
OpCode
===
null
)
{
return
'Error: OpCode was null in VM_PreparedCommand'
;
}
indent
=
indent
||
''
;
return
indent
+
'{
\
n'
+
indent
+
' OpCode: '
+
getDebugString
(
VM_Commands
,
this
.
OpCode
)
+
',
\
n'
+
indent
+
' ByteMode: '
+
this
.
ByteMode
+
',
\
n'
+
indent
+
' Op1: '
+
this
.
Op1
.
toString
()
+
',
\
n'
+
indent
+
' Op2: '
+
this
.
Op2
.
toString
()
+
',
\
n'
+
indent
+
'}'
;
/** @return {string} */
VM_PreparedCommand
.
prototype
.
toString
=
function
(
indent
)
{
if
(
this
.
OpCode
===
null
)
{
return
'Error: OpCode was null in VM_PreparedCommand'
;
}
}
indent
=
indent
||
''
;
return
indent
+
'{
\
n'
+
indent
+
' OpCode: '
+
getDebugString
(
VM_Commands
,
this
.
OpCode
)
+
',
\
n'
+
indent
+
' ByteMode: '
+
this
.
ByteMode
+
',
\
n'
+
indent
+
' Op1: '
+
this
.
Op1
.
toString
()
+
',
\
n'
+
indent
+
' Op2: '
+
this
.
Op2
.
toString
()
+
',
\
n'
+
indent
+
'}'
;
};
/**
* @struct
* @constructor
*/
class
VM_PreparedProgram
{
constructor
()
{
/** @type {Array<VM_PreparedCommand>} */
this
.
Cmd
=
[];
var
VM_PreparedProgram
=
function
()
{
/** @type {Array<VM_PreparedCommand>} */
this
.
Cmd
=
[];
/** @type {Array<VM_PreparedCommand>} */
this
.
AltCmd
=
null
;
/** @type {Array<VM_PreparedCommand>} */
this
.
AltCmd
=
null
;
/** @type {Uint8Array} */
this
.
GlobalData
=
new
Uint8Array
();
/** @type {Uint8Array} */
this
.
GlobalData
=
new
Uint8Array
();
/** @type {Uint8Array} */
this
.
StaticData
=
new
Uint8Array
();
// static data contained in DB operators
/** @type {Uint8Array} */
this
.
StaticData
=
new
Uint8Array
();
// static data contained in DB operators
/** @type {Uint32Array} */
this
.
InitR
=
new
Uint32Array
(
7
);
/** @type {Uint32Array} */
this
.
InitR
=
new
Uint32Array
(
7
);
/**
* A pointer to bytes that have been filtered by a program.
* @type {Uint8Array}
*/
this
.
FilteredData
=
null
;
}
/**
* A pointer to bytes that have been filtered by a program.
* @type {Uint8Array}
*/
this
.
FilteredData
=
null
;
};
/** @return {string} */
toString
()
{
let
s
=
'{
\
n Cmd: [
\
n'
;
for
(
let
i
=
0
;
i
<
this
.
Cmd
.
length
;
++
i
)
{
s
+=
this
.
Cmd
[
i
].
toString
(
' '
)
+
',
\
n'
;
}
s
+=
'],
\
n'
;
// TODO: Dump GlobalData, StaticData, InitR?
s
+=
' }
\
n'
;
return
s
;
/** @return {string} */
VM_PreparedProgram
.
prototype
.
toString
=
function
()
{
var
s
=
'{
\
n Cmd: [
\
n'
;
for
(
var
i
=
0
;
i
<
this
.
Cmd
.
length
;
++
i
)
{
s
+=
this
.
Cmd
[
i
].
toString
(
' '
)
+
',
\
n'
;
}
}
s
+=
'],
\
n'
;
// TODO: Dump GlobalData, StaticData, InitR?
s
+=
' }
\
n'
;
return
s
;
};
/**
* @struct
* @constructor
*/
class
UnpackFilter
{
constructor
()
{
/** @type {number} */
this
.
BlockStart
=
0
;
var
UnpackFilter
=
function
()
{
/** @type {number} */
this
.
BlockStart
=
0
;
/** @type {number} */
this
.
BlockLength
=
0
;
/** @type {number} */
this
.
BlockLength
=
0
;
/** @type {number} */
this
.
ExecCount
=
0
;
/** @type {number} */
this
.
ExecCount
=
0
;
/** @type {boolean} */
this
.
NextWindow
=
false
;
/** @type {boolean} */
this
.
NextWindow
=
false
;
// position of parent filter in Filters array used as prototype for filter
// in PrgStack array. Not defined for filters in Filters array.
/** @type {number} */
this
.
ParentFilter
=
null
;
// position of parent filter in Filters array used as prototype for filter
// in PrgStack array. Not defined for filters in Filters array.
/** @type {number} */
this
.
ParentFilter
=
null
;
/** @type {VM_PreparedProgram} */
this
.
Prg
=
new
VM_PreparedProgram
();
}
}
/** @type {VM_PreparedProgram} */
this
.
Prg
=
new
VM_PreparedProgram
();
};
const
VMCF_OP0
=
0
;
const
VMCF_OP1
=
1
;
const
VMCF_OP2
=
2
;
const
VMCF_OPMASK
=
3
;
const
VMCF_BYTEMODE
=
4
;
const
VMCF_JUMP
=
8
;
const
VMCF_PROC
=
16
;
const
VMCF_USEFLAGS
=
32
;
const
VMCF_CHFLAGS
=
64
;
const
VM_CmdFlags
=
[
var
VMCF_OP0
=
0
;
var
VMCF_OP1
=
1
;
var
VMCF_OP2
=
2
;
var
VMCF_OPMASK
=
3
;
var
VMCF_BYTEMODE
=
4
;
var
VMCF_JUMP
=
8
;
var
VMCF_PROC
=
16
;
var
VMCF_USEFLAGS
=
32
;
var
VMCF_CHFLAGS
=
64
;
var
VM_CmdFlags
=
[
/* VM_MOV */
VMCF_OP2
|
VMCF_BYTEMODE
,
/* VM_CMP */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_ADD */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
...
...
@@ -413,29 +383,27 @@ const VM_CmdFlags = [
/**
* @param {number} length
* @param {number} crc
* @param {VM_StandardFilters} type
* @struct
* @constructor
*/
class
StandardFilterSignature
{
/**
* @param {number} length
* @param {number} crc
* @param {VM_StandardFilters} type
*/
constructor
(
length
,
crc
,
type
)
{
/** @type {number} */
this
.
Length
=
length
;
var
StandardFilterSignature
=
function
(
length
,
crc
,
type
)
{
/** @type {number} */
this
.
Length
=
length
;
/** @type {number} */
this
.
CRC
=
crc
;
/** @type {number} */
this
.
CRC
=
crc
;
/** @type {VM_StandardFilters} */
this
.
Type
=
type
;
}
}
/** @type {VM_StandardFilters} */
this
.
Type
=
type
;
};
/**
* @type {Array<StandardFilterSignature>}
*/
const
StdList
=
[
var
StdList
=
[
new
StandardFilterSignature
(
53
,
0xad576887
,
VM_StandardFilters
.
VMSF_E8
),
new
StandardFilterSignature
(
57
,
0x3cd7e57e
,
VM_StandardFilters
.
VMSF_E8E9
),
new
StandardFilterSignature
(
120
,
0x3769893f
,
VM_StandardFilters
.
VMSF_ITANIUM
),
...
...
@@ -448,562 +416,403 @@ const StdList = [
/**
* @constructor
*/
class
RarVM
{
constructor
()
{
/** @private {Uint8Array} */
this
.
mem_
=
null
;
var
RarVM
=
function
()
{
/** @private {Uint8Array} */
this
.
mem_
=
null
;
/** @private {Uint32Array<number>} */
this
.
R_
=
new
Uint32Array
(
8
);
/** @private {Uint32Array<number>} */
this
.
R_
=
new
Uint32Array
(
8
);
/** @private {number} */
this
.
flags_
=
0
;
}
/** @private {number} */
this
.
flags_
=
0
;
};
/**
* Initializes the memory of the VM.
*/
init
()
{
if
(
!
this
.
mem_
)
{
this
.
mem_
=
new
Uint8Array
(
VM_MEMSIZE
);
}
/**
* Initializes the memory of the VM.
*/
RarVM
.
prototype
.
init
=
function
()
{
if
(
!
this
.
mem_
)
{
this
.
mem_
=
new
Uint8Array
(
VM_MEMSIZE
);
}
};
/**
* @param {Uint8Array} code
* @return {VM_StandardFilters}
*/
isStandardFilter
(
code
)
{
const
codeCRC
=
(
CRC
(
0xffffffff
,
code
,
code
.
length
)
^
0xffffffff
)
>>>
0
;
for
(
let
i
=
0
;
i
<
StdList
.
length
;
++
i
)
{
if
(
StdList
[
i
].
CRC
==
codeCRC
&&
StdList
[
i
].
Length
==
code
.
length
)
return
StdList
[
i
].
Type
;
}
return
VM_StandardFilters
.
VMSF_NONE
;
/**
* @param {Uint8Array} code
* @return {VM_StandardFilters}
*/
RarVM
.
prototype
.
isStandardFilter
=
function
(
code
)
{
var
codeCRC
=
(
CRC
(
0xffffffff
,
code
,
code
.
length
)
^
0xffffffff
)
>>>
0
;
for
(
var
i
=
0
;
i
<
StdList
.
length
;
++
i
)
{
if
(
StdList
[
i
].
CRC
==
codeCRC
&&
StdList
[
i
].
Length
==
code
.
length
)
return
StdList
[
i
].
Type
;
}
/**
* @param {VM_PreparedOperand} op
* @param {boolean} byteMode
* @param {bitjs.io.BitStream} bstream A rtl bit stream.
*/
decodeArg
(
op
,
byteMode
,
bstream
)
{
const
data
=
bstream
.
peekBits
(
16
);
if
(
data
&
0x8000
)
{
op
.
Type
=
VM_OpType
.
VM_OPREG
;
// Operand is register (R[0]..R[7])
bstream
.
readBits
(
1
);
// 1 flag bit and...
op
.
Data
=
bstream
.
readBits
(
3
);
// ... 3 register number bits
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]]
// TODO &R[Op.Data] // Register address
return
VM_StandardFilters
.
VMSF_NONE
;
};
/**
* @param {VM_PreparedOperand} op
* @param {boolean} byteMode
* @param {bitjs.io.BitStream} bstream A rtl bit stream.
*/
RarVM
.
prototype
.
decodeArg
=
function
(
op
,
byteMode
,
bstream
)
{
var
data
=
bstream
.
peekBits
(
16
);
if
(
data
&
0x8000
)
{
op
.
Type
=
VM_OpType
.
VM_OPREG
;
// Operand is register (R[0]..R[7])
bstream
.
readBits
(
1
);
// 1 flag bit and...
op
.
Data
=
bstream
.
readBits
(
3
);
// ... 3 register number bits
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]]
// TODO &R[Op.Data] // Register address
}
else
{
if
((
data
&
0xc000
)
==
0
)
{
op
.
Type
=
VM_OpType
.
VM_OPINT
;
// Operand is integer
bstream
.
readBits
(
2
);
// 2 flag bits
if
(
byteMode
)
{
op
.
Data
=
bstream
.
readBits
(
8
);
// Byte integer.
}
else
{
op
.
Data
=
RarVM
.
readData
(
bstream
);
// 32 bit integer.
}
}
else
{
if
((
data
&
0xc000
)
==
0
)
{
op
.
Type
=
VM_OpType
.
VM_OPINT
;
// Operand is integer
bstream
.
readBits
(
2
);
// 2 flag bits
if
(
byteMode
)
{
op
.
Data
=
bstream
.
readBits
(
8
);
// Byte integ
er.
}
else
{
op
.
Data
=
RarVM
.
readData
(
bstream
);
// 32 bit integer.
}
// Operand is data addressed by register data, base address or both.
op
.
Type
=
VM_OpType
.
VM_OPREGMEM
;
if
((
data
&
0x2000
)
==
0
)
{
bstream
.
readBits
(
3
);
// 3 flag bits
// Base address is zero, just use the address from regist
er.
op
.
Data
=
bstream
.
readBits
(
3
);
// (Data>>10)&7
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]];
// TODO &R[op.Data]
op
.
Base
=
0
;
}
else
{
// Operand is data addressed by register data, base address or both.
op
.
Type
=
VM_OpType
.
VM_OPREGMEM
;
if
((
data
&
0x2000
)
==
0
)
{
bstream
.
readBits
(
3
);
// 3 flag bits
// Base address is zero, just use the address from register.
op
.
Data
=
bstream
.
readBits
(
3
);
// (Data>>10)&7
bstream
.
readBits
(
4
);
// 4 flag bits
if
((
data
&
0x1000
)
==
0
)
{
// Use both register and base address.
op
.
Data
=
bstream
.
readBits
(
3
);
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]];
// TODO &R[op.Data]
op
.
Base
=
0
;
}
else
{
bstream
.
readBits
(
4
);
// 4 flag bits
if
((
data
&
0x1000
)
==
0
)
{
// Use both register and base address.
op
.
Data
=
bstream
.
readBits
(
3
);
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]];
// TODO &R[op.Data]
}
else
{
// Use base address only. Access memory by fixed address.
op
.
Data
=
0
;
}
op
.
Base
=
RarVM
.
readData
(
bstream
);
// Read base address.
// Use base address only. Access memory by fixed address.
op
.
Data
=
0
;
}
op
.
Base
=
RarVM
.
readData
(
bstream
);
// Read base address.
}
}
}
};
/**
* @param {VM_PreparedProgram} prg
*/
execute
(
prg
)
{
this
.
R_
.
set
(
prg
.
InitR
);
const
globalSize
=
Math
.
min
(
prg
.
GlobalData
.
length
,
VM_GLOBALMEMSIZE
);
if
(
globalSize
)
{
this
.
mem_
.
set
(
prg
.
GlobalData
.
subarray
(
0
,
globalSize
),
VM_GLOBALMEMADDR
);
}
const
staticSize
=
Math
.
min
(
prg
.
StaticData
.
length
,
VM_GLOBALMEMSIZE
-
globalSize
);
if
(
staticSize
)
{
this
.
mem_
.
set
(
prg
.
StaticData
.
subarray
(
0
,
staticSize
),
VM_GLOBALMEMADDR
+
globalSize
);
}
/**
* @param {VM_PreparedProgram} prg
*/
RarVM
.
prototype
.
execute
=
function
(
prg
)
{
this
.
R_
.
set
(
prg
.
InitR
);
this
.
R_
[
7
]
=
VM_MEMSIZE
;
this
.
flags_
=
0
;
var
globalSize
=
Math
.
min
(
prg
.
GlobalData
.
length
,
VM_GLOBALMEMSIZE
);
if
(
globalSize
)
{
this
.
mem_
.
set
(
prg
.
GlobalData
.
subarray
(
0
,
globalSize
),
VM_GLOBALMEMADDR
);
}
const
preparedCodes
=
prg
.
AltCmd
?
prg
.
AltCmd
:
prg
.
Cmd
;
if
(
prg
.
Cmd
.
length
>
0
&&
!
this
.
executeCode
(
preparedCodes
))
{
// Invalid VM program. Let's replace it with 'return' command.
preparedCode
.
OpCode
=
VM_Commands
.
VM_RET
;
}
var
staticSize
=
Math
.
min
(
prg
.
StaticData
.
length
,
VM_GLOBALMEMSIZE
-
globalSize
);
if
(
staticSize
)
{
this
.
mem_
.
set
(
prg
.
StaticData
.
subarray
(
0
,
staticSize
),
VM_GLOBALMEMADDR
+
globalSize
);
}
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
let
newBlockPos
=
dataView
.
getUint32
(
0x20
,
true
/* little endian */
)
&
VM_MEMMASK
;
const
newBlockSize
=
dataView
.
getUint32
(
0x1c
,
true
/* little endian */
)
&
VM_MEMMASK
;
if
(
newBlockPos
+
newBlockSize
>=
VM_MEMSIZE
)
{
newBlockPos
=
newBlockSize
=
0
;
}
prg
.
FilteredData
=
this
.
mem_
.
subarray
(
newBlockPos
,
newBlockPos
+
newBlockSize
);
this
.
R_
[
7
]
=
VM_MEMSIZE
;
this
.
flags_
=
0
;
prg
.
GlobalData
=
new
Uint8Array
(
0
);
var
preparedCodes
=
prg
.
AltCmd
?
prg
.
AltCmd
:
prg
.
Cmd
;
if
(
prg
.
Cmd
.
length
>
0
&&
!
this
.
executeCode
(
preparedCodes
))
{
// Invalid VM program. Let's replace it with 'return' command.
preparedCode
.
OpCode
=
VM_Commands
.
VM_RET
;
}
const
dataSize
=
Math
.
min
(
dataView
.
getUint32
(
0x30
),
(
VM_GLOBALMEMSIZE
-
VM_FIXEDGLOBALSIZE
));
if
(
dataSize
!=
0
)
{
const
len
=
dataSize
+
VM_FIXEDGLOBALSIZE
;
prg
.
GlobalData
=
new
Uint8Array
(
len
);
prg
.
GlobalData
.
set
(
mem
.
subarray
(
VM_GLOBALMEMADDR
,
VM_GLOBALMEMADDR
+
len
));
}
var
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
var
newBlockPos
=
dataView
.
getUint32
(
0x20
,
true
/* little endian */
)
&
VM_MEMMASK
;
var
newBlockSize
=
dataView
.
getUint32
(
0x1c
,
true
/* little endian */
)
&
VM_MEMMASK
;
if
(
newBlockPos
+
newBlockSize
>=
VM_MEMSIZE
)
{
newBlockPos
=
newBlockSize
=
0
;
}
prg
.
FilteredData
=
this
.
mem_
.
subarray
(
newBlockPos
,
newBlockPos
+
newBlockSize
);
/**
* @param {Array<VM_PreparedCommand>} preparedCodes
* @return {boolean}
*/
executeCode
(
preparedCodes
)
{
let
codeIndex
=
0
;
let
cmd
=
preparedCodes
[
codeIndex
];
// TODO: Why is this an infinite loop instead of just returning
// when a VM_RET is hit?
while
(
1
)
{
switch
(
cmd
.
OpCode
)
{
case
VM_Commands
.
VM_RET
:
if
(
this
.
R_
[
7
]
>=
VM_MEMSIZE
)
{
return
true
;
}
//SET_IP(GET_VALUE(false,(uint *)&Mem[R[7] & VM_MEMMASK]));
this
.
R_
[
7
]
+=
4
;
continue
;
case
VM_Commands
.
VM_STANDARD
:
this
.
executeStandardFilter
(
cmd
.
Op1
.
Data
);
break
;
default
:
console
.
error
(
'RarVM OpCode not supported: '
+
getDebugString
(
VM_Commands
,
cmd
.
OpCode
));
break
;
}
// switch (cmd.OpCode)
codeIndex
++
;
cmd
=
preparedCodes
[
codeIndex
];
}
prg
.
GlobalData
=
new
Uint8Array
(
0
);
var
dataSize
=
Math
.
min
(
dataView
.
getUint32
(
0x30
),
(
VM_GLOBALMEMSIZE
-
VM_FIXEDGLOBALSIZE
));
if
(
dataSize
!=
0
)
{
var
len
=
dataSize
+
VM_FIXEDGLOBALSIZE
;
prg
.
GlobalData
=
new
Uint8Array
(
len
);
prg
.
GlobalData
.
set
(
mem
.
subarray
(
VM_GLOBALMEMADDR
,
VM_GLOBALMEMADDR
+
len
));
}
};
/**
* @param {number} filterType
*/
executeStandardFilter
(
filterType
)
{
switch
(
filterType
)
{
case
VM_StandardFilters
.
VMSF_RGB
:
{
const
dataSize
=
this
.
R_
[
4
];
const
width
=
this
.
R_
[
0
]
-
3
;
const
posR
=
this
.
R_
[
1
];
const
Channels
=
3
;
let
srcOffset
=
0
;
let
destOffset
=
dataSize
;
// byte *SrcData=Mem,*DestData=SrcData+DataSize;
// SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
/* offset */
);
dataView
.
setUint32
(
0x20
/* byte offset */
,
dataSize
/* value */
,
true
/* little endian */
);
if
(
dataSize
>=
(
VM_GLOBALMEMADDR
/
2
)
||
posR
<
0
)
{
break
;
/**
* @param {Array<VM_PreparedCommand>} preparedCodes
* @return {boolean}
*/
RarVM
.
prototype
.
executeCode
=
function
(
preparedCodes
)
{
var
codeIndex
=
0
;
var
cmd
=
preparedCodes
[
codeIndex
];
// TODO: Why is this an infinite loop instead of just returning
// when a VM_RET is hit?
while
(
1
)
{
switch
(
cmd
.
OpCode
)
{
case
VM_Commands
.
VM_RET
:
if
(
this
.
R_
[
7
]
>=
VM_MEMSIZE
)
{
return
true
;
}
//SET_IP(GET_VALUE(false,(uint *)&Mem[R[7] & VM_MEMMASK]));
this
.
R_
[
7
]
+=
4
;
continue
;
for
(
let
curChannel
=
0
;
curChannel
<
Channels
;
++
curChannel
)
{
let
prevByte
=
0
;
for
(
let
i
=
curChannel
;
i
<
dataSize
;
i
+=
Channels
)
{
let
predicted
;
const
upperPos
=
i
-
width
;
if
(
upperPos
>=
3
)
{
const
upperByte
=
this
.
mem_
[
destOffset
+
upperPos
];
const
upperLeftByte
=
this
.
mem_
[
destOffset
+
upperPos
-
3
];
predicted
=
prevByte
+
upperByte
-
upperLeftByte
;
const
pa
=
Math
.
abs
(
predicted
-
prevByte
);
const
pb
=
Math
.
abs
(
predicted
-
upperByte
);
const
pc
=
Math
.
abs
(
predicted
-
upperLeftByte
);
if
(
pa
<=
pb
&&
pa
<=
pc
)
{
predicted
=
prevByte
;
}
else
if
(
pb
<=
pc
)
{
predicted
=
upperByte
;
}
else
{
predicted
=
upperLeftByte
;
}
}
else
{
predicted
=
prevByte
;
}
//DestData[I]=PrevByte=(byte)(Predicted-*(SrcData++));
prevByte
=
(
predicted
-
this
.
mem_
[
srcOffset
++
])
&
0xff
;
this
.
mem_
[
destOffset
+
i
]
=
prevByte
;
}
}
for
(
let
i
=
posR
,
border
=
dataSize
-
2
;
i
<
border
;
i
+=
3
)
{
const
g
=
this
.
mem_
[
destOffset
+
i
+
1
];
this
.
mem_
[
destOffset
+
i
]
+=
g
;
this
.
mem_
[
destOffset
+
i
+
2
]
+=
g
;
}
case
VM_Commands
.
VM_STANDARD
:
this
.
executeStandardFilter
(
cmd
.
Op1
.
Data
);
break
;
default
:
console
.
error
(
'RarVM OpCode not supported: '
+
getDebugString
(
VM_Commands
,
cmd
.
OpCode
));
break
;
}
}
// switch (cmd.OpCode)
codeIndex
++
;
cmd
=
preparedCodes
[
codeIndex
];
}
};
// The C++ version of this standard filter uses an odd mixture of
// signed and unsigned integers, bytes and various casts. Careful!
case
VM_StandardFilters
.
VMSF_AUDIO
:
{
const
dataSize
=
this
.
R_
[
4
];
const
channels
=
this
.
R_
[
0
];
let
srcOffset
=
0
;
let
destOffset
=
dataSize
;
//SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
dataView
.
setUint32
(
0x20
/* byte offset */
,
dataSize
/* value */
,
true
/* little endian */
);
if
(
dataSize
>=
VM_GLOBALMEMADDR
/
2
)
{
break
;
}
/**
* @param {number} filterType
*/
RarVM
.
prototype
.
executeStandardFilter
=
function
(
filterType
)
{
switch
(
filterType
)
{
case
VM_StandardFilters
.
VMSF_DELTA
:
var
dataSize
=
this
.
R_
[
4
];
var
channels
=
this
.
R_
[
0
];
var
srcPos
=
0
;
var
border
=
dataSize
*
2
;
//SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
var
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
dataView
.
setUint32
(
0x20
,
dataSize
,
true
/* little endian */
);
if
(
dataSize
>=
VM_GLOBALMEMADDR
/
2
)
{
break
;
}
for
(
let
curChannel
=
0
;
curChannel
<
channels
;
++
curChannel
)
{
let
prevByte
=
0
;
// uint
let
prevDelta
=
0
;
// uint
let
dif
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
];
let
d1
=
0
,
d2
=
0
,
d3
;
// ints
let
k1
=
0
,
k2
=
0
,
k3
=
0
;
// ints
for
(
var
i
=
curChannel
,
byteCount
=
0
;
i
<
dataSize
;
i
+=
channels
,
++
byteCount
)
{
d3
=
d2
;
d2
=
fromUnsigned32ToSigned32
(
prevDelta
-
d1
);
d1
=
fromUnsigned32ToSigned32
(
prevDelta
);
let
predicted
=
fromSigned32ToUnsigned32
(
8
*
prevByte
+
k1
*
d1
+
k2
*
d2
+
k3
*
d3
);
// uint
predicted
=
(
predicted
>>>
3
)
&
0xff
;
let
curByte
=
this
.
mem_
[
srcOffset
++
];
// uint
// Predicted-=CurByte;
predicted
=
fromSigned32ToUnsigned32
(
predicted
-
curByte
);
this
.
mem_
[
destOffset
+
i
]
=
(
predicted
&
0xff
);
// PrevDelta=(signed char)(Predicted-PrevByte);
// where Predicted, PrevByte, PrevDelta are all unsigned int (32)
// casting this subtraction to a (signed char) is kind of invalid
// but it does the following:
// - do the subtraction
// - get the bottom 8 bits of the result
// - if it was >= 0x80, then the value is negative (subtract 0x100)
// - if the value is now negative, add 0x100000000 to make unsigned
//
// Example:
// predicted = 101
// prevByte = 4294967158
// (predicted - prevByte) = -4294967057
// take lower 8 bits: 1110 1111 = 239
// since > 127, subtract 256 = -17
// since < 0, add 0x100000000 = 4294967279
prevDelta
=
fromSigned32ToUnsigned32
(
fromUnsigned8ToSigned8
((
predicted
-
prevByte
)
&
0xff
));
prevByte
=
predicted
;
// int D=((signed char)CurByte)<<3;
let
curByteAsSignedChar
=
fromUnsigned8ToSigned8
(
curByte
);
// signed char
let
d
=
(
curByteAsSignedChar
<<
3
);
dif
[
0
]
+=
Math
.
abs
(
d
);
dif
[
1
]
+=
Math
.
abs
(
d
-
d1
);
dif
[
2
]
+=
Math
.
abs
(
d
+
d1
);
dif
[
3
]
+=
Math
.
abs
(
d
-
d2
);
dif
[
4
]
+=
Math
.
abs
(
d
+
d2
);
dif
[
5
]
+=
Math
.
abs
(
d
-
d3
);
dif
[
6
]
+=
Math
.
abs
(
d
+
d3
);
if
((
byteCount
&
0x1f
)
==
0
)
{
let
minDif
=
dif
[
0
],
numMinDif
=
0
;
dif
[
0
]
=
0
;
for
(
let
j
=
1
;
j
<
7
;
++
j
)
{
if
(
dif
[
j
]
<
minDif
)
{
minDif
=
dif
[
j
];
numMinDif
=
j
;
}
dif
[
j
]
=
0
;
}
switch
(
numMinDif
)
{
case
1
:
if
(
k1
>=-
16
)
k1
--
;
break
;
case
2
:
if
(
k1
<
16
)
k1
++
;
break
;
case
3
:
if
(
k2
>=-
16
)
k2
--
;
break
;
case
4
:
if
(
k2
<
16
)
k2
++
;
break
;
case
5
:
if
(
k3
>=-
16
)
k3
--
;
break
;
case
6
:
if
(
k3
<
16
)
k3
++
;
break
;
}
}
}
// Bytes from same channels are grouped to continual data blocks,
// so we need to place them back to their interleaving positions.
for
(
var
curChannel
=
0
;
curChannel
<
channels
;
++
curChannel
)
{
var
prevByte
=
0
;
for
(
var
destPos
=
dataSize
+
curChannel
;
destPos
<
border
;
destPos
+=
channels
)
{
prevByte
=
(
prevByte
-
this
.
mem_
[
srcPos
++
])
&
0xff
;
this
.
mem_
[
destPos
]
=
prevByte
;
}
break
;
}
case
VM_StandardFilters
.
VMSF_DELTA
:
{
const
dataSize
=
this
.
R_
[
4
];
const
channels
=
this
.
R_
[
0
];
let
srcPos
=
0
;
const
border
=
dataSize
*
2
;
break
;
//SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
dataView
.
setUint32
(
0x20
/* byte offset */
,
dataSize
/* value */
,
true
/* little endian */
)
;
default
:
console
.
error
(
'RarVM Standard Filter not supported: '
+
getDebugString
(
VM_StandardFilters
,
filterType
)
);
break
;
}
}
;
if
(
dataSize
>=
VM_GLOBALMEMADDR
/
2
)
{
break
;
}
/**
* @param {Uint8Array} code
* @param {VM_PreparedProgram} prg
*/
RarVM
.
prototype
.
prepare
=
function
(
code
,
prg
)
{
var
codeSize
=
code
.
length
;
// Bytes from same channels are grouped to continual data blocks,
// so we need to place them back to their interleaving positions.
for
(
let
curChannel
=
0
;
curChannel
<
channels
;
++
curChannel
)
{
let
prevByte
=
0
;
for
(
let
destPos
=
dataSize
+
curChannel
;
destPos
<
border
;
destPos
+=
channels
)
{
prevByte
=
(
prevByte
-
this
.
mem_
[
srcPos
++
])
&
0xff
;
this
.
mem_
[
destPos
]
=
prevByte
;
}
}
//InitBitInput();
//memcpy(InBuf,Code,Min(CodeSize,BitInput::MAX_SIZE));
var
bstream
=
new
bitjs
.
io
.
BitStream
(
code
.
buffer
,
true
/* rtl */
);
break
;
}
// Calculate the single byte XOR checksum to check validity of VM code.
var
xorSum
=
0
;
for
(
var
i
=
1
;
i
<
codeSize
;
++
i
)
{
xorSum
^=
code
[
i
];
}
default
:
console
.
error
(
'RarVM Standard Filter not supported: '
+
getDebugString
(
VM_StandardFilters
,
filterType
));
break
;
bstream
.
readBits
(
8
);
prg
.
Cmd
=
[];
// TODO: Is this right? I don't see it being done in rarvm.cpp.
// VM code is valid if equal.
if
(
xorSum
==
code
[
0
])
{
var
filterType
=
this
.
isStandardFilter
(
code
);
if
(
filterType
!=
VM_StandardFilters
.
VMSF_NONE
)
{
// VM code is found among standard filters.
var
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
curCmd
.
OpCode
=
VM_Commands
.
VM_STANDARD
;
curCmd
.
Op1
.
Data
=
filterType
;
// TODO: Addr=&CurCmd->Op1.Data
curCmd
.
Op1
.
Addr
=
[
curCmd
.
Op1
.
Data
];
curCmd
.
Op2
.
Addr
=
[
null
];
// &CurCmd->Op2.Data;
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
codeSize
=
0
;
}
}
/**
* @param {Uint8Array} code
* @param {VM_PreparedProgram} prg
*/
prepare
(
code
,
prg
)
{
let
codeSize
=
code
.
length
;
var
dataFlag
=
bstream
.
readBits
(
1
);
//InitBitInput();
//memcpy(InBuf,Code,Min(CodeSize,BitInput::MAX_SIZE));
const
bstream
=
new
bitjs
.
io
.
BitStream
(
code
.
buffer
,
true
/* rtl */
);
// Read static data contained in DB operators. This data cannot be
// changed, it is a part of VM code, not a filter parameter.
// Calculate the single byte XOR checksum to check validity of VM code.
let
xorSum
=
0
;
for
(
let
i
=
1
;
i
<
codeSize
;
++
i
)
{
xorSum
^=
code
[
i
];
if
(
dataFlag
&
0x8000
)
{
var
dataSize
=
RarVM
.
readData
(
bstream
)
+
1
;
// TODO: This accesses the byte pointer of the bstream directly. Is that ok?
for
(
var
i
=
0
;
i
<
bstream
.
bytePtr
<
codeSize
&&
i
<
dataSize
;
++
i
)
{
// Append a byte to the program's static data.
var
newStaticData
=
new
Uint8Array
(
prg
.
StaticData
.
length
+
1
);
newStaticData
.
set
(
prg
.
StaticData
);
newStaticData
[
newStaticData
.
length
-
1
]
=
bstream
.
readBits
(
8
);
prg
.
StaticData
=
newStaticData
;
}
}
bstream
.
readBits
(
8
);
prg
.
Cmd
=
[];
// TODO: Is this right? I don't see it being done in rarvm.cpp.
// VM code is valid if equal.
if
(
xorSum
==
code
[
0
])
{
const
filterType
=
this
.
isStandardFilter
(
code
);
if
(
filterType
!=
VM_StandardFilters
.
VMSF_NONE
)
{
// VM code is found among standard filters.
const
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
curCmd
.
OpCode
=
VM_Commands
.
VM_STANDARD
;
curCmd
.
Op1
.
Data
=
filterType
;
// TODO: Addr=&CurCmd->Op1.Data
curCmd
.
Op1
.
Addr
=
[
curCmd
.
Op1
.
Data
];
curCmd
.
Op2
.
Addr
=
[
null
];
// &CurCmd->Op2.Data;
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
codeSize
=
0
;
while
(
bstream
.
bytePtr
<
codeSize
)
{
var
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
// Prg->Cmd.Add(1)
var
flag
=
bstream
.
peekBits
(
1
);
if
(
!
flag
)
{
// (Data&0x8000)==0
curCmd
.
OpCode
=
bstream
.
readBits
(
4
);
}
else
{
curCmd
.
OpCode
=
(
bstream
.
readBits
(
6
)
-
24
);
}
const
dataFlag
=
bstream
.
readBits
(
1
);
// Read static data contained in DB operators. This data cannot be
// changed, it is a part of VM code, not a filter parameter.
if
(
dataFlag
&
0x8000
)
{
const
dataSize
=
RarVM
.
readData
(
bstream
)
+
1
;
// TODO: This accesses the byte pointer of the bstream directly. Is that ok?
for
(
let
i
=
0
;
i
<
bstream
.
bytePtr
<
codeSize
&&
i
<
dataSize
;
++
i
)
{
// Append a byte to the program's static data.
const
newStaticData
=
new
Uint8Array
(
prg
.
StaticData
.
length
+
1
);
newStaticData
.
set
(
prg
.
StaticData
);
newStaticData
[
newStaticData
.
length
-
1
]
=
bstream
.
readBits
(
8
);
prg
.
StaticData
=
newStaticData
;
}
if
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
VMCF_BYTEMODE
)
{
curCmd
.
ByteMode
=
(
bstream
.
readBits
(
1
)
!=
0
);
}
else
{
curCmd
.
ByteMode
=
0
;
}
while
(
bstream
.
bytePtr
<
codeSize
)
{
const
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
// Prg->Cmd.Add(1)
const
flag
=
bstream
.
peekBits
(
1
);
if
(
!
flag
)
{
// (Data&0x8000)==0
curCmd
.
OpCode
=
bstream
.
readBits
(
4
);
}
else
{
curCmd
.
OpCode
=
(
bstream
.
readBits
(
6
)
-
24
);
}
if
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
VMCF_BYTEMODE
)
{
curCmd
.
ByteMode
=
(
bstream
.
readBits
(
1
)
!=
0
);
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
var
opNum
=
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
VMCF_OPMASK
);
curCmd
.
Op1
.
Addr
=
null
;
curCmd
.
Op2
.
Addr
=
null
;
if
(
opNum
>
0
)
{
this
.
decodeArg
(
curCmd
.
Op1
,
curCmd
.
ByteMode
,
bstream
);
// reading the first operand
if
(
opNum
==
2
)
{
this
.
decodeArg
(
curCmd
.
Op2
,
curCmd
.
ByteMode
,
bstream
);
// reading the second operand
}
else
{
curCmd
.
ByteMode
=
0
;
}
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
const
opNum
=
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
VMCF_OPMASK
);
curCmd
.
Op1
.
Addr
=
null
;
curCmd
.
Op2
.
Addr
=
null
;
if
(
opNum
>
0
)
{
this
.
decodeArg
(
curCmd
.
Op1
,
curCmd
.
ByteMode
,
bstream
);
// reading the first operand
if
(
opNum
==
2
)
{
this
.
decodeArg
(
curCmd
.
Op2
,
curCmd
.
ByteMode
,
bstream
);
// reading the second operand
}
else
{
if
(
curCmd
.
Op1
.
Type
==
VM_OpType
.
VM_OPINT
&&
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
(
VMCF_JUMP
|
VMCF_PROC
)))
{
// Calculating jump distance.
let
distance
=
curCmd
.
Op1
.
Data
;
if
(
distance
>=
256
)
{
distance
-=
256
;
if
(
curCmd
.
Op1
.
Type
==
VM_OpType
.
VM_OPINT
&&
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
(
VMCF_JUMP
|
VMCF_PROC
)))
{
// Calculating jump distance.
var
distance
=
curCmd
.
Op1
.
Data
;
if
(
distance
>=
256
)
{
distance
-=
256
;
}
else
{
if
(
distance
>=
136
)
{
distance
-=
264
;
}
else
{
if
(
distance
>=
1
3
6
)
{
distance
-=
264
;
if
(
distance
>=
16
)
{
distance
-=
8
;
}
else
{
if
(
distance
>=
16
)
{
distance
-=
8
;
}
else
{
if
(
distance
>=
8
)
{
distance
-=
16
;
}
if
(
distance
>=
8
)
{
distance
-=
16
;
}
}
distance
+=
prg
.
Cmd
.
length
;
}
curCmd
.
Op1
.
Data
=
distance
;
distance
+=
prg
.
Cmd
.
length
;
}
curCmd
.
Op1
.
Data
=
distance
;
}
}
// if (OpNum>0)
}
//
while ((uint)InAddr<CodeSize
)
}
//
if (XorSum==Code[0]
)
const
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
curCmd
.
OpCode
=
VM_Commands
.
VM_RET
;
// TODO: Addr=&CurCmd->Op1.Data
curCmd
.
Op1
.
Addr
=
[
curCmd
.
Op1
.
Data
];
curCmd
.
Op2
.
Addr
=
[
curCmd
.
Op2
.
Data
];
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
// If operand 'Addr' field has not been set by DecodeArg calls above,
// let's set it to point to operand 'Data' field. It is necessary for
// VM_OPINT type operands (usual integers) or maybe if something was
// not set properly for other operands. 'Addr' field is required
// for quicker addressing of operand data.
for
(
let
i
=
0
;
i
<
prg
.
Cmd
.
length
;
++
i
)
{
const
cmd
=
prg
.
Cmd
[
i
];
if
(
cmd
.
Op1
.
Addr
==
null
)
{
cmd
.
Op1
.
Addr
=
[
cmd
.
Op1
.
Data
];
}
if
(
cmd
.
Op2
.
Addr
==
null
)
{
cmd
.
Op2
.
Addr
=
[
cmd
.
Op2
.
Data
];
}
}
}
//
if (OpNum>0
)
}
//
while ((uint)InAddr<CodeSize
)
}
// if (XorSum==Code[0])
var
curCmd
=
new
VM_PreparedCommand
(
);
prg
.
Cmd
.
push
(
curCmd
)
;
curCmd
.
OpCode
=
VM_Commands
.
VM_RET
;
// TODO: Addr=&CurCmd->Op1.Data
curCmd
.
Op1
.
Addr
=
[
curCmd
.
Op1
.
Data
];
curCmd
.
Op2
.
Addr
=
[
curCmd
.
Op2
.
Data
]
;
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
// If operand 'Addr' field has not been set by DecodeArg calls above,
// let's set it to point to operand 'Data' field. It is necessary for
// VM_OPINT type operands (usual integers) or maybe if something was
// not set properly for other operands. 'Addr' field is required
// for quicker addressing of operand data.
for
(
var
i
=
0
;
i
<
prg
.
Cmd
.
length
;
++
i
)
{
var
cmd
=
prg
.
Cmd
[
i
];
if
(
cmd
.
Op1
.
Addr
==
null
)
{
cmd
.
Op1
.
Addr
=
[
cmd
.
Op1
.
Data
];
}
if
(
cmd
.
Op2
.
Addr
==
null
)
{
cmd
.
Op2
.
Addr
=
[
cmd
.
Op2
.
Data
];
}
/*
#ifdef VM_OPTIMIZE
if (CodeSize!=0)
Optimize(Prg);
#endif
*/
}
/**
* @param {Uint8Array} arr The byte array to set a value in.
* @param {number} value The unsigned 32-bit value to set.
* @param {number} offset Offset into arr to start setting the value, defaults to 0.
*/
setLowEndianValue
(
arr
,
value
,
offset
)
{
const
i
=
offset
||
0
;
arr
[
i
]
=
value
&
0xff
;
arr
[
i
+
1
]
=
(
value
>>>
8
)
&
0xff
;
arr
[
i
+
2
]
=
(
value
>>>
16
)
&
0xff
;
arr
[
i
+
3
]
=
(
value
>>>
24
)
&
0xff
;
}
/*
#ifdef VM_OPTIMIZE
if (CodeSize!=0)
Optimize(Prg);
#endif
*/
};
/**
* Sets a number of bytes of the VM memory at the given position from a
* source buffer of bytes.
* @param {number} pos The position in the VM memory to start writing to.
* @param {Uint8Array} buffer The source buffer of bytes.
* @param {number} dataSize The number of bytes to set.
*/
setMemory
(
pos
,
buffer
,
dataSize
)
{
if
(
pos
<
VM_MEMSIZE
)
{
const
numBytes
=
Math
.
min
(
dataSize
,
VM_MEMSIZE
-
pos
);
for
(
let
i
=
0
;
i
<
numBytes
;
++
i
)
{
this
.
mem_
[
pos
+
i
]
=
buffer
[
i
];
}
/**
* @param {Uint8Array} arr The byte array to set a value in.
* @param {number} value The unsigned 32-bit value to set.
* @param {number} offset Offset into arr to start setting the value, defaults to 0.
*/
RarVM
.
prototype
.
setLowEndianValue
=
function
(
arr
,
value
,
offset
)
{
var
i
=
offset
||
0
;
arr
[
i
]
=
value
&
0xff
;
arr
[
i
+
1
]
=
(
value
>>>
8
)
&
0xff
;
arr
[
i
+
2
]
=
(
value
>>>
16
)
&
0xff
;
arr
[
i
+
3
]
=
(
value
>>>
24
)
&
0xff
;
};
/**
* Sets a number of bytes of the VM memory at the given position from a
* source buffer of bytes.
* @param {number} pos The position in the VM memory to start writing to.
* @param {Uint8Array} buffer The source buffer of bytes.
* @param {number} dataSize The number of bytes to set.
*/
RarVM
.
prototype
.
setMemory
=
function
(
pos
,
buffer
,
dataSize
)
{
if
(
pos
<
VM_MEMSIZE
)
{
var
numBytes
=
Math
.
min
(
dataSize
,
VM_MEMSIZE
-
pos
);
for
(
var
i
=
0
;
i
<
numBytes
;
++
i
)
{
this
.
mem_
[
pos
+
i
]
=
buffer
[
i
];
}
}
};
/**
* Static function that reads in the next set of bits for the VM
* (might return 4, 8, 16 or 32 bits).
* @param {bitjs.io.BitStream} bstream A RTL bit stream.
* @return {number} The value of the bits read.
*/
static
readData
(
bstream
)
{
// Read in the first 2 bits.
const
flags
=
bstream
.
readBits
(
2
);
switch
(
flags
)
{
// Data&0xc000
// Return the next 4 bits.
case
0
:
return
bstream
.
readBits
(
4
);
// (Data>>10)&0xf
case
1
:
// 0x4000
// 0x3c00 => 0011 1100 0000 0000
if
(
bstream
.
peekBits
(
4
)
==
0
)
{
// (Data&0x3c00)==0
// Skip the 4 zero bits.
bstream
.
readBits
(
4
);
// Read in the next 8 and pad with 1s to 32 bits.
return
(
0xffffff00
|
bstream
.
readBits
(
8
))
>>>
0
;
// ((Data>>2)&0xff)
}
/**
* Static function that reads in the next set of bits for the VM
* (might return 4, 8, 16 or 32 bits).
* @param {bitjs.io.BitStream} bstream A RTL bit stream.
* @return {number} The value of the bits read.
*/
RarVM
.
readData
=
function
(
bstream
)
{
// Read in the first 2 bits.
var
flags
=
bstream
.
readBits
(
2
);
switch
(
flags
)
{
// Data&0xc000
// Return the next 4 bits.
case
0
:
return
bstream
.
readBits
(
4
);
// (Data>>10)&0xf
case
1
:
// 0x4000
// 0x3c00 => 0011 1100 0000 0000
if
(
bstream
.
peekBits
(
4
)
==
0
)
{
// (Data&0x3c00)==0
// Skip the 4 zero bits.
bstream
.
readBits
(
4
);
// Read in the next 8 and pad with 1s to 32 bits.
return
(
0xffffff00
|
bstream
.
readBits
(
8
))
>>>
0
;
// ((Data>>2)&0xff)
}
// Else, read in the next 8.
return
bstream
.
readBits
(
8
);
// Else, read in the next 8.
return
bstream
.
readBits
(
8
);
// Read in the next 16.
case
2
:
// 0x8000
const
val
=
bstream
.
getBits
();
bstream
.
readBits
(
16
);
return
val
;
//bstream.readBits(16);
// Read in the next 16.
case
2
:
// 0x8000
var
val
=
bstream
.
getBits
();
bstream
.
readBits
(
16
);
return
val
;
//bstream.readBits(16);
// case 3
default
:
return
(
bstream
.
readBits
(
16
)
<<
16
)
|
bstream
.
readBits
(
16
);
}
// case 3
default
:
return
(
bstream
.
readBits
(
16
)
<<
16
)
|
bstream
.
readBits
(
16
);
}
}
}
;
// ============================================================================================== //
cps/static/js/archive/unrar.js
View file @
7982ed87
...
...
@@ -7,309 +7,264 @@
* Copyright(c) 2011 antimatter15
*/
// TODO: Rewrite the RarLocalHeader parsing to use a ByteStream instead
// of a BitStream so that it throws properly when not enough bytes are
// present.
// This file expects to be invoked as a Worker (see onmessage below).
importScripts
(
'../io/bitstream.js'
);
importScripts
(
'../io/bytestream.js'
);
importScripts
(
'../io/bytebuffer.js'
);
importScripts
(
'archive.js'
);
importScripts
(
'rarvm.js'
);
const
UnarchiveState
=
{
NOT_STARTED
:
0
,
UNARCHIVING
:
1
,
WAITING
:
2
,
FINISHED
:
3
,
};
// State - consider putting these into a class.
let
unarchiveState
=
UnarchiveState
.
NOT_STARTED
;
let
bytestream
=
null
;
let
allLocalFiles
=
null
;
let
logToConsole
=
false
;
// Progress variables.
let
currentFilename
=
''
;
let
currentFileNumber
=
0
;
let
currentBytesUnarchivedInFile
=
0
;
let
currentBytesUnarchived
=
0
;
let
totalUncompressedBytesInArchive
=
0
;
let
totalFilesInArchive
=
0
;
var
currentFilename
=
""
;
var
currentFileNumber
=
0
;
var
currentBytesUnarchivedInFile
=
0
;
var
currentBytesUnarchived
=
0
;
var
totalUncompressedBytesInArchive
=
0
;
var
totalFilesInArchive
=
0
;
// Helper functions.
const
info
=
function
(
str
)
{
var
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
const
err
=
function
(
str
)
{
var
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
const
postProgress
=
function
()
{
var
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
parseInt
(
bytestream
.
getNumBytesRead
(),
10
),
));
totalFilesInArchive
));
};
// shows a byte value as its hex representation
const
nibble
=
'0123456789ABCDEF'
;
const
byteValueToHexString
=
function
(
num
)
{
var
nibble
=
"0123456789ABCDEF"
;
var
byteValueToHexString
=
function
(
num
)
{
return
nibble
[
num
>>
4
]
+
nibble
[
num
&
0xF
];
};
const
twoByteValueToHexString
=
function
(
num
)
{
var
twoByteValueToHexString
=
function
(
num
)
{
return
nibble
[(
num
>>
12
)
&
0xF
]
+
nibble
[(
num
>>
8
)
&
0xF
]
+
nibble
[(
num
>>
4
)
&
0xF
]
+
nibble
[
num
&
0xF
];
};
// Volume Types
const
MARK_HEAD
=
0x72
;
const
MAIN_HEAD
=
0x73
;
const
FILE_HEAD
=
0x74
;
const
COMM_HEAD
=
0x75
;
const
AV_HEAD
=
0x76
;
const
SUB_HEAD
=
0x77
;
const
PROTECT_HEAD
=
0x78
;
const
SIGN_HEAD
=
0x79
;
const
NEWSUB_HEAD
=
0x7a
;
const
ENDARC_HEAD
=
0x7b
;
var
MARK_HEAD
=
0x72
,
MAIN_HEAD
=
0x73
,
FILE_HEAD
=
0x74
,
COMM_HEAD
=
0x75
,
AV_HEAD
=
0x76
,
SUB_HEAD
=
0x77
,
PROTECT_HEAD
=
0x78
,
SIGN_HEAD
=
0x79
,
NEWSUB_HEAD
=
0x7a
,
ENDARC_HEAD
=
0x7b
;
// ============================================================================================== //
/**
* @param {bitjs.io.BitStream} bstream
* @constructor
*/
class
RarVolumeHeader
{
/**
* @param {bitjs.io.ByteStream} bstream
*/
constructor
(
bstream
)
{
let
headBytesRead
=
0
;
// byte 1,2
this
.
crc
=
bstream
.
readNumber
(
2
);
// byte 3
this
.
headType
=
bstream
.
readNumber
(
1
);
// Get flags
// bytes 4,5
this
.
flags
=
{};
this
.
flags
.
value
=
bstream
.
readNumber
(
2
);
const
flagsValue
=
this
.
flags
.
value
;
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
flags
.
MHD_VOLUME
=
!!
(
flagsValue
&
0x01
);
this
.
flags
.
MHD_COMMENT
=
!!
(
flagsValue
&
0x02
);
this
.
flags
.
MHD_LOCK
=
!!
(
flagsValue
&
0x04
);
this
.
flags
.
MHD_SOLID
=
!!
(
flagsValue
&
0x08
);
this
.
flags
.
MHD_PACK_COMMENT
=
!!
(
flagsValue
&
0x10
);
this
.
flags
.
MHD_NEWNUMBERING
=
this
.
flags
.
MHD_PACK_COMMENT
;
this
.
flags
.
MHD_AV
=
!!
(
flagsValue
&
0x20
);
this
.
flags
.
MHD_PROTECT
=
!!
(
flagsValue
&
0x40
);
this
.
flags
.
MHD_PASSWORD
=
!!
(
flagsValue
&
0x80
);
this
.
flags
.
MHD_FIRSTVOLUME
=
!!
(
flagsValue
&
0x100
);
this
.
flags
.
MHD_ENCRYPTVER
=
!!
(
flagsValue
&
0x200
);
//bstream.readBits(6); // unused
break
;
case
FILE_HEAD
:
this
.
flags
.
LHD_SPLIT_BEFORE
=
!!
(
flagsValue
&
0x01
);
this
.
flags
.
LHD_SPLIT_AFTER
=
!!
(
flagsValue
&
0x02
);
this
.
flags
.
LHD_PASSWORD
=
!!
(
flagsValue
&
0x04
);
this
.
flags
.
LHD_COMMENT
=
!!
(
flagsValue
&
0x08
);
this
.
flags
.
LHD_SOLID
=
!!
(
flagsValue
&
0x10
);
// 3 bits unused
this
.
flags
.
LHD_LARGE
=
!!
(
flagsValue
&
0x100
);
this
.
flags
.
LHD_UNICODE
=
!!
(
flagsValue
&
0x200
);
this
.
flags
.
LHD_SALT
=
!!
(
flagsValue
&
0x400
);
this
.
flags
.
LHD_VERSION
=
!!
(
flagsValue
&
0x800
);
this
.
flags
.
LHD_EXTTIME
=
!!
(
flagsValue
&
0x1000
);
this
.
flags
.
LHD_EXTFLAGS
=
!!
(
flagsValue
&
0x2000
);
// 2 bits unused
//info(' LHD_SPLIT_BEFORE = ' + this.flags.LHD_SPLIT_BEFORE);
break
;
default
:
break
;
}
// byte 6,7
this
.
headSize
=
bstream
.
readNumber
(
2
);
headBytesRead
+=
7
;
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
highPosAv
=
bstream
.
readNumber
(
2
);
this
.
posAv
=
bstream
.
readNumber
(
4
);
headBytesRead
+=
6
;
if
(
this
.
flags
.
MHD_ENCRYPTVER
)
{
this
.
encryptVer
=
bstream
.
readNumber
(
1
);
headBytesRead
+=
1
;
}
//info('Found MAIN_HEAD with highPosAv=' + this.highPosAv + ', posAv=' + this.posAv);
break
;
case
FILE_HEAD
:
this
.
packSize
=
bstream
.
readNumber
(
4
);
this
.
unpackedSize
=
bstream
.
readNumber
(
4
);
this
.
hostOS
=
bstream
.
readNumber
(
1
);
this
.
fileCRC
=
bstream
.
readNumber
(
4
);
this
.
fileTime
=
bstream
.
readNumber
(
4
);
this
.
unpVer
=
bstream
.
readNumber
(
1
);
this
.
method
=
bstream
.
readNumber
(
1
);
this
.
nameSize
=
bstream
.
readNumber
(
2
);
this
.
fileAttr
=
bstream
.
readNumber
(
4
);
headBytesRead
+=
25
;
if
(
this
.
flags
.
LHD_LARGE
)
{
//info('Warning: Reading in LHD_LARGE 64-bit size values');
this
.
HighPackSize
=
bstream
.
readNumber
(
4
);
this
.
HighUnpSize
=
bstream
.
readNumber
(
4
);
headBytesRead
+=
8
;
}
else
{
this
.
HighPackSize
=
0
;
this
.
HighUnpSize
=
0
;
if
(
this
.
unpackedSize
==
0xffffffff
)
{
this
.
HighUnpSize
=
0x7fffffff
this
.
unpackedSize
=
0xffffffff
;
}
}
this
.
fullPackSize
=
0
;
this
.
fullUnpackSize
=
0
;
this
.
fullPackSize
|=
this
.
HighPackSize
;
this
.
fullPackSize
<<=
32
;
this
.
fullPackSize
|=
this
.
packSize
;
// read in filename
// TODO: Use readString?
this
.
filename
=
bstream
.
readBytes
(
this
.
nameSize
);
headBytesRead
+=
this
.
nameSize
;
let
_s
=
''
;
for
(
let
_i
=
0
;
_i
<
this
.
filename
.
length
;
_i
++
)
{
_s
+=
String
.
fromCharCode
(
this
.
filename
[
_i
]);
}
var
RarVolumeHeader
=
function
(
bstream
)
{
var
headPos
=
bstream
.
bytePtr
;
// byte 1,2
info
(
"Rar Volume Header @"
+
bstream
.
bytePtr
);
this
.
crc
=
bstream
.
readBits
(
16
);
info
(
" crc="
+
this
.
crc
);
this
.
filename
=
_s
;
// byte 3
this
.
headType
=
bstream
.
readBits
(
8
);
info
(
" headType="
+
this
.
headType
);
if
(
this
.
flags
.
LHD_SALT
)
{
//info('Warning: Reading in 64-bit salt value');
this
.
salt
=
bstream
.
readBytes
(
8
);
// 8 bytes
headBytesRead
+=
8
;
// Get flags
// bytes 4,5
this
.
flags
=
{};
this
.
flags
.
value
=
bstream
.
peekBits
(
16
);
info
(
" flags="
+
twoByteValueToHexString
(
this
.
flags
.
value
));
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
flags
.
MHD_VOLUME
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_COMMENT
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_LOCK
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_SOLID
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_PACK_COMMENT
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_NEWNUMBERING
=
this
.
flags
.
MHD_PACK_COMMENT
;
this
.
flags
.
MHD_AV
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_PROTECT
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_PASSWORD
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_FIRSTVOLUME
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_ENCRYPTVER
=
!!
bstream
.
readBits
(
1
);
bstream
.
readBits
(
6
);
// unused
break
;
case
FILE_HEAD
:
this
.
flags
.
LHD_SPLIT_BEFORE
=
!!
bstream
.
readBits
(
1
);
// 0x0001
this
.
flags
.
LHD_SPLIT_AFTER
=
!!
bstream
.
readBits
(
1
);
// 0x0002
this
.
flags
.
LHD_PASSWORD
=
!!
bstream
.
readBits
(
1
);
// 0x0004
this
.
flags
.
LHD_COMMENT
=
!!
bstream
.
readBits
(
1
);
// 0x0008
this
.
flags
.
LHD_SOLID
=
!!
bstream
.
readBits
(
1
);
// 0x0010
bstream
.
readBits
(
3
);
// unused
this
.
flags
.
LHD_LARGE
=
!!
bstream
.
readBits
(
1
);
// 0x0100
this
.
flags
.
LHD_UNICODE
=
!!
bstream
.
readBits
(
1
);
// 0x0200
this
.
flags
.
LHD_SALT
=
!!
bstream
.
readBits
(
1
);
// 0x0400
this
.
flags
.
LHD_VERSION
=
!!
bstream
.
readBits
(
1
);
// 0x0800
this
.
flags
.
LHD_EXTTIME
=
!!
bstream
.
readBits
(
1
);
// 0x1000
this
.
flags
.
LHD_EXTFLAGS
=
!!
bstream
.
readBits
(
1
);
// 0x2000
bstream
.
readBits
(
2
);
// unused
info
(
" LHD_SPLIT_BEFORE = "
+
this
.
flags
.
LHD_SPLIT_BEFORE
);
break
;
default
:
bstream
.
readBits
(
16
);
}
// byte 6,7
this
.
headSize
=
bstream
.
readBits
(
16
);
info
(
" headSize="
+
this
.
headSize
);
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
highPosAv
=
bstream
.
readBits
(
16
);
this
.
posAv
=
bstream
.
readBits
(
32
);
if
(
this
.
flags
.
MHD_ENCRYPTVER
)
{
this
.
encryptVer
=
bstream
.
readBits
(
8
);
}
info
(
"Found MAIN_HEAD with highPosAv="
+
this
.
highPosAv
+
", posAv="
+
this
.
posAv
);
break
;
case
FILE_HEAD
:
this
.
packSize
=
bstream
.
readBits
(
32
);
this
.
unpackedSize
=
bstream
.
readBits
(
32
);
this
.
hostOS
=
bstream
.
readBits
(
8
);
this
.
fileCRC
=
bstream
.
readBits
(
32
);
this
.
fileTime
=
bstream
.
readBits
(
32
);
this
.
unpVer
=
bstream
.
readBits
(
8
);
this
.
method
=
bstream
.
readBits
(
8
);
this
.
nameSize
=
bstream
.
readBits
(
16
);
this
.
fileAttr
=
bstream
.
readBits
(
32
);
if
(
this
.
flags
.
LHD_LARGE
)
{
info
(
"Warning: Reading in LHD_LARGE 64-bit size values"
);
this
.
HighPackSize
=
bstream
.
readBits
(
32
);
this
.
HighUnpSize
=
bstream
.
readBits
(
32
);
}
else
{
this
.
HighPackSize
=
0
;
this
.
HighUnpSize
=
0
;
if
(
this
.
unpackedSize
==
0xffffffff
)
{
this
.
HighUnpSize
=
0x7fffffff
this
.
unpackedSize
=
0xffffffff
;
}
}
this
.
fullPackSize
=
0
;
this
.
fullUnpackSize
=
0
;
this
.
fullPackSize
|=
this
.
HighPackSize
;
this
.
fullPackSize
<<=
32
;
this
.
fullPackSize
|=
this
.
packSize
;
// read in filename
this
.
filename
=
bstream
.
readBytes
(
this
.
nameSize
);
for
(
var
_i
=
0
,
_s
=
''
;
_i
<
this
.
filename
.
length
;
_i
++
)
{
_s
+=
String
.
fromCharCode
(
this
.
filename
[
_i
]);
}
this
.
filename
=
_s
;
if
(
this
.
flags
.
LHD_SALT
)
{
info
(
"Warning: Reading in 64-bit salt value"
);
this
.
salt
=
bstream
.
readBits
(
64
);
// 8 bytes
}
if
(
this
.
flags
.
LHD_EXTTIME
)
{
// 16-bit flags
var
extTimeFlags
=
bstream
.
readBits
(
16
);
// this is adapted straight out of arcread.cpp, Archive::ReadHeader()
for
(
var
I
=
0
;
I
<
4
;
++
I
)
{
var
rmode
=
extTimeFlags
>>
((
3
-
I
)
*
4
);
if
((
rmode
&
8
)
==
0
)
{
continue
;
}
if
(
this
.
flags
.
LHD_EXTTIME
)
{
// 16-bit flags
const
extTimeFlags
=
bstream
.
readNumber
(
2
);
headBytesRead
+=
2
;
// this is adapted straight out of arcread.cpp, Archive::ReadHeader()
for
(
let
I
=
0
;
I
<
4
;
++
I
)
{
const
rmode
=
extTimeFlags
>>
((
3
-
I
)
*
4
);
if
((
rmode
&
8
)
==
0
)
{
continue
;
}
if
(
I
!=
0
)
{
bstream
.
readBytes
(
2
);
headBytesRead
+=
2
;
}
const
count
=
(
rmode
&
3
);
for
(
let
J
=
0
;
J
<
count
;
++
J
)
{
bstream
.
readNumber
(
1
);
headBytesRead
+=
1
;
}
if
(
I
!=
0
)
bstream
.
readBits
(
16
);
var
count
=
(
rmode
&
3
);
for
(
var
J
=
0
;
J
<
count
;
++
J
)
{
bstream
.
readBits
(
8
);
}
}
if
(
this
.
flags
.
LHD_COMMENT
)
{
//info('Found a LHD_COMMENT');
}
if
(
headBytesRead
<
this
.
headSize
)
{
bstream
.
readBytes
(
this
.
headSize
-
headBytesRead
);
}
break
;
case
ENDARC_HEAD
:
break
;
default
:
if
(
logToConsole
)
{
info
(
'Found a header of type 0x'
+
byteValueToHexString
(
this
.
headType
));
}
// skip the rest of the header bytes (for now)
bstream
.
readBytes
(
this
.
headSize
-
7
);
break
;
}
}
}
dump
()
{
info
(
' crc='
+
this
.
crc
);
info
(
' headType='
+
this
.
headType
);
info
(
' flags='
+
twoByteValueToHexString
(
this
.
flags
.
value
));
info
(
' headSize='
+
this
.
headSize
);
if
(
this
.
headType
==
FILE_HEAD
)
{
info
(
'Found FILE_HEAD with packSize='
+
this
.
packSize
+
', unpackedSize= '
+
this
.
unpackedSize
+
', hostOS='
+
this
.
hostOS
+
', unpVer='
+
this
.
unpVer
+
', method='
+
this
.
method
+
', filename='
+
this
.
filename
);
if
(
this
.
flags
.
LHD_COMMENT
)
{
info
(
"Found a LHD_COMMENT"
);
}
while
(
headPos
+
this
.
headSize
>
bstream
.
bytePtr
)
{
bstream
.
readBits
(
1
);
}
info
(
"Found FILE_HEAD with packSize="
+
this
.
packSize
+
", unpackedSize= "
+
this
.
unpackedSize
+
", hostOS="
+
this
.
hostOS
+
", unpVer="
+
this
.
unpVer
+
", method="
+
this
.
method
+
", filename="
+
this
.
filename
);
break
;
default
:
info
(
"Found a header of type 0x"
+
byteValueToHexString
(
this
.
headType
));
// skip the rest of the header bytes (for now)
bstream
.
readBytes
(
this
.
headSize
-
7
);
break
;
}
}
}
;
const
BLOCK_LZ
=
0
;
const
BLOCK_PPM
=
1
;
var
BLOCK_LZ
=
0
,
BLOCK_PPM
=
1
;
const
rLDecode
=
[
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
10
,
12
,
14
,
16
,
20
,
24
,
28
,
32
,
40
,
48
,
56
,
64
,
80
,
96
,
112
,
128
,
160
,
192
,
224
];
const
rLBits
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
4
,
4
,
4
,
4
,
5
,
5
,
5
,
5
];
const
rDBitLengthCounts
=
[
4
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
14
,
0
,
12
];
const
rSDDecode
=
[
0
,
4
,
8
,
16
,
32
,
64
,
128
,
192
];
const
rSDBits
=
[
2
,
2
,
3
,
4
,
5
,
6
,
6
,
6
];
var
rLDecode
=
[
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
10
,
12
,
14
,
16
,
20
,
24
,
28
,
32
,
40
,
48
,
56
,
64
,
80
,
96
,
112
,
128
,
160
,
192
,
224
],
rLBits
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
4
,
4
,
4
,
4
,
5
,
5
,
5
,
5
],
rDBitLengthCounts
=
[
4
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
14
,
0
,
12
],
rSDDecode
=
[
0
,
4
,
8
,
16
,
32
,
64
,
128
,
192
],
rSDBits
=
[
2
,
2
,
3
,
4
,
5
,
6
,
6
,
6
];
const
rDDecode
=
[
0
,
1
,
2
,
3
,
4
,
6
,
8
,
12
,
16
,
24
,
32
,
var
rDDecode
=
[
0
,
1
,
2
,
3
,
4
,
6
,
8
,
12
,
16
,
24
,
32
,
48
,
64
,
96
,
128
,
192
,
256
,
384
,
512
,
768
,
1024
,
1536
,
2048
,
3072
,
4096
,
6144
,
8192
,
12288
,
16384
,
24576
,
32768
,
49152
,
65536
,
98304
,
131072
,
196608
,
262144
,
327680
,
393216
,
458752
,
524288
,
589824
,
655360
,
720896
,
786432
,
851968
,
917504
,
983040
];
const
rDBits
=
[
0
,
0
,
0
,
0
,
1
,
1
,
2
,
2
,
3
,
3
,
4
,
4
,
5
,
var
rDBits
=
[
0
,
0
,
0
,
0
,
1
,
1
,
2
,
2
,
3
,
3
,
4
,
4
,
5
,
5
,
6
,
6
,
7
,
7
,
8
,
8
,
9
,
9
,
10
,
10
,
11
,
11
,
12
,
12
,
13
,
13
,
14
,
14
,
15
,
15
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
];
const
rLOW_DIST_REP_COUNT
=
16
;
var
rLOW_DIST_REP_COUNT
=
16
;
const
rNC
=
299
;
const
rDC
=
60
;
const
rLDC
=
17
;
const
rRC
=
28
;
const
rBC
=
20
;
const
rHUFF_TABLE_SIZE
=
(
rNC
+
rDC
+
rRC
+
rLDC
);
var
rNC
=
299
,
rDC
=
60
,
rLDC
=
17
,
rRC
=
28
,
rBC
=
20
,
rHUFF_TABLE_SIZE
=
(
rNC
+
rDC
+
rRC
+
rLDC
);
const
UnpOldTable
=
new
Array
(
rHUFF_TABLE_SIZE
);
var
UnpBlockType
=
BLOCK_LZ
;
var
UnpOldTable
=
new
Array
(
rHUFF_TABLE_SIZE
);
const
BD
=
{
//bitdecode
var
BD
=
{
//bitdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rBC
)
};
const
LD
=
{
//litdecode
var
LD
=
{
//litdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rNC
)
};
const
DD
=
{
//distdecode
var
DD
=
{
//distdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rDC
)
};
const
LDD
=
{
//low dist decode
var
LDD
=
{
//low dist decode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rLDC
)
};
const
RD
=
{
//rep decode
var
RD
=
{
//rep decode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rRC
)
...
...
@@ -318,19 +273,19 @@ const RD = { //rep decode
/**
* @type {Array<bitjs.io.ByteBuffer>}
*/
const
rOldBuffers
=
[];
var
rOldBuffers
=
[];
/**
* The current buffer we are unpacking to.
* @type {bitjs.io.ByteBuffer}
*/
let
rBuffer
;
var
rBuffer
;
/**
* The buffer of the final bytes after filtering (only used in Unpack29).
* @type {bitjs.io.ByteBuffer}
*/
let
wBuffer
;
var
wBuffer
;
/**
...
...
@@ -349,38 +304,39 @@ let wBuffer;
* @param {bitjs.io.BitStream} bstream
*/
function
RarReadTables
(
bstream
)
{
const
BitLength
=
new
Array
(
rBC
);
const
Table
=
new
Array
(
rHUFF_TABLE_SIZE
);
var
BitLength
=
new
Array
(
rBC
);
var
Table
=
new
Array
(
rHUFF_TABLE_SIZE
);
// before we start anything we need to get byte-aligned
bstream
.
readBits
(
(
8
-
bstream
.
bitPtr
)
&
0x7
);
if
(
bstream
.
readBits
(
1
))
{
info
(
'Error! PPM not implemented yet'
);
info
(
"Error! PPM not implemented yet"
);
return
;
}
if
(
!
bstream
.
readBits
(
1
))
{
//discard old table
for
(
let
i
=
UnpOldTable
.
length
;
i
--
;)
{
for
(
var
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
0
;
}
}
// read in bit lengths
for
(
let
I
=
0
;
I
<
rBC
;
++
I
)
{
const
Length
=
bstream
.
readBits
(
4
);
for
(
var
I
=
0
;
I
<
rBC
;
++
I
)
{
var
Length
=
bstream
.
readBits
(
4
);
if
(
Length
==
15
)
{
let
ZeroCount
=
bstream
.
readBits
(
4
);
var
ZeroCount
=
bstream
.
readBits
(
4
);
if
(
ZeroCount
==
0
)
{
BitLength
[
I
]
=
15
;
}
else
{
}
else
{
ZeroCount
+=
2
;
while
(
ZeroCount
--
>
0
&&
I
<
rBC
)
{
while
(
ZeroCount
--
>
0
&&
I
<
rBC
)
BitLength
[
I
++
]
=
0
;
}
--
I
;
}
}
else
{
}
else
{
BitLength
[
I
]
=
Length
;
}
}
...
...
@@ -389,21 +345,22 @@ function RarReadTables(bstream) {
RarMakeDecodeTables
(
BitLength
,
0
,
BD
,
rBC
);
const
TableSize
=
rHUFF_TABLE_SIZE
;
for
(
let
i
=
0
;
i
<
TableSize
;)
{
const
num
=
RarDecodeNumber
(
bstream
,
BD
);
var
TableSize
=
rHUFF_TABLE_SIZE
;
//console.log(DecodeLen, DecodePos, DecodeNum);
for
(
var
i
=
0
;
i
<
TableSize
;)
{
var
num
=
RarDecodeNumber
(
bstream
,
BD
);
if
(
num
<
16
)
{
Table
[
i
]
=
(
num
+
UnpOldTable
[
i
])
&
0xf
;
i
++
;
}
else
if
(
num
<
18
)
{
let
N
=
(
num
==
16
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
var
N
=
(
num
==
16
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
while
(
N
--
>
0
&&
i
<
TableSize
)
{
Table
[
i
]
=
Table
[
i
-
1
];
i
++
;
}
}
else
{
let
N
=
(
num
==
18
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
var
N
=
(
num
==
18
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
while
(
N
--
>
0
&&
i
<
TableSize
)
{
Table
[
i
++
]
=
0
;
...
...
@@ -416,7 +373,7 @@ function RarReadTables(bstream) {
RarMakeDecodeTables
(
Table
,
rNC
+
rDC
,
LDD
,
rLDC
);
RarMakeDecodeTables
(
Table
,
rNC
+
rDC
+
rLDC
,
RD
,
rRC
);
for
(
let
i
=
UnpOldTable
.
length
;
i
--
;)
{
for
(
var
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
Table
[
i
];
}
return
true
;
...
...
@@ -424,12 +381,10 @@ function RarReadTables(bstream) {
function
RarDecodeNumber
(
bstream
,
dec
)
{
const
DecodeLen
=
dec
.
DecodeLen
;
const
DecodePos
=
dec
.
DecodePos
;
const
DecodeNum
=
dec
.
DecodeNum
;
const
bitField
=
bstream
.
getBits
()
&
0xfffe
;
var
DecodeLen
=
dec
.
DecodeLen
,
DecodePos
=
dec
.
DecodePos
,
DecodeNum
=
dec
.
DecodeNum
;
var
bitField
=
bstream
.
getBits
()
&
0xfffe
;
//some sort of rolled out binary search
const
bits
=
((
bitField
<
DecodeLen
[
8
])?
var
bits
=
((
bitField
<
DecodeLen
[
8
])?
((
bitField
<
DecodeLen
[
4
])?
((
bitField
<
DecodeLen
[
2
])?
((
bitField
<
DecodeLen
[
1
])?
1
:
2
)
...
...
@@ -445,25 +400,25 @@ function RarDecodeNumber(bstream, dec) {
((
bitField
<
DecodeLen
[
13
])?
13
:
14
)
:
15
));
bstream
.
readBits
(
bits
);
const
N
=
DecodePos
[
bits
]
+
((
bitField
-
DecodeLen
[
bits
-
1
])
>>>
(
16
-
bits
));
var
N
=
DecodePos
[
bits
]
+
((
bitField
-
DecodeLen
[
bits
-
1
])
>>>
(
16
-
bits
));
return
DecodeNum
[
N
];
}
function
RarMakeDecodeTables
(
BitLength
,
offset
,
dec
,
size
)
{
const
DecodeLen
=
dec
.
DecodeLen
;
const
DecodePos
=
dec
.
DecodePos
;
const
DecodeNum
=
dec
.
DecodeNum
;
const
LenCount
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
const
TmpPos
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
let
N
=
0
;
let
M
=
0
;
for
(
let
i
=
DecodeNum
.
length
;
i
--
;)
{
var
DecodeLen
=
dec
.
DecodeLen
;
var
DecodePos
=
dec
.
DecodePos
;
var
DecodeNum
=
dec
.
DecodeNum
;
var
LenCount
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
var
TmpPos
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
var
N
=
0
;
var
M
=
0
;
for
(
var
i
=
DecodeNum
.
length
;
i
--
;)
{
DecodeNum
[
i
]
=
0
;
}
for
(
let
i
=
0
;
i
<
size
;
i
++
)
{
for
(
var
i
=
0
;
i
<
size
;
i
++
)
{
LenCount
[
BitLength
[
i
+
offset
]
&
0xF
]
++
;
}
LenCount
[
0
]
=
0
;
...
...
@@ -471,7 +426,7 @@ function RarMakeDecodeTables(BitLength, offset, dec, size) {
DecodePos
[
0
]
=
0
;
DecodeLen
[
0
]
=
0
;
for
(
let
I
=
1
;
I
<
16
;
++
I
)
{
for
(
var
I
=
1
;
I
<
16
;
++
I
)
{
N
=
2
*
(
N
+
LenCount
[
I
]);
M
=
(
N
<<
(
15
-
I
));
if
(
M
>
0xFFFF
)
{
...
...
@@ -481,7 +436,7 @@ function RarMakeDecodeTables(BitLength, offset, dec, size) {
DecodePos
[
I
]
=
DecodePos
[
I
-
1
]
+
LenCount
[
I
-
1
];
TmpPos
[
I
]
=
DecodePos
[
I
];
}
for
(
let
I
=
0
;
I
<
size
;
++
I
)
{
for
(
I
=
0
;
I
<
size
;
++
I
)
{
if
(
BitLength
[
I
+
offset
]
!=
0
)
{
DecodeNum
[
TmpPos
[
BitLength
[
offset
+
I
]
&
0xF
]
++
]
=
I
;
}
...
...
@@ -495,7 +450,7 @@ function RarMakeDecodeTables(BitLength, offset, dec, size) {
* @param {boolean} Solid
*/
function
Unpack15
(
bstream
,
Solid
)
{
info
(
'ERROR! RAR 1.5 compression not supported'
);
info
(
"ERROR! RAR 1.5 compression not supported"
);
}
/**
...
...
@@ -504,25 +459,23 @@ function Unpack15(bstream, Solid) {
* @param {boolean} Solid
*/
function
Unpack20
(
bstream
,
Solid
)
{
const
destUnpSize
=
rBuffer
.
data
.
length
;
let
oldDistPtr
=
0
;
if
(
!
Solid
)
{
RarReadTables20
(
bstream
);
}
var
destUnpSize
=
rBuffer
.
data
.
length
;
var
oldDistPtr
=
0
;
RarReadTables20
(
bstream
);
while
(
destUnpSize
>
rBuffer
.
ptr
)
{
let
num
=
RarDecodeNumber
(
bstream
,
LD
);
var
num
=
RarDecodeNumber
(
bstream
,
LD
);
if
(
num
<
256
)
{
rBuffer
.
insertByte
(
num
);
continue
;
}
if
(
num
>
269
)
{
let
Length
=
rLDecode
[
num
-=
270
]
+
3
;
var
Length
=
rLDecode
[
num
-=
270
]
+
3
;
if
((
Bits
=
rLBits
[
num
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
let
DistNumber
=
RarDecodeNumber
(
bstream
,
DD
);
let
Distance
=
rDDecode
[
DistNumber
]
+
1
;
var
DistNumber
=
RarDecodeNumber
(
bstream
,
DD
);
var
Distance
=
rDDecode
[
DistNumber
]
+
1
;
if
((
Bits
=
rDBits
[
DistNumber
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
...
...
@@ -548,9 +501,9 @@ function Unpack20(bstream, Solid) {
continue
;
}
if
(
num
<
261
)
{
const
Distance
=
rOldDist
[(
oldDistPtr
-
(
num
-
256
))
&
3
];
const
LengthNumber
=
RarDecodeNumber
(
bstream
,
RD
);
let
Length
=
rLDecode
[
LengthNumber
]
+
2
;
var
Distance
=
rOldDist
[(
oldDistPtr
-
(
num
-
256
))
&
3
];
var
LengthNumber
=
RarDecodeNumber
(
bstream
,
RD
);
var
Length
=
rLDecode
[
LengthNumber
]
+
2
;
if
((
Bits
=
rLBits
[
LengthNumber
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
...
...
@@ -569,7 +522,7 @@ function Unpack20(bstream, Solid) {
continue
;
}
if
(
num
<
270
)
{
let
Distance
=
rSDDecode
[
num
-=
261
]
+
1
;
var
Distance
=
rSDDecode
[
num
-=
261
]
+
1
;
if
((
Bits
=
rSDBits
[
num
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
...
...
@@ -584,41 +537,38 @@ function Unpack20(bstream, Solid) {
}
function
RarUpdateProgress
()
{
const
change
=
rBuffer
.
ptr
-
currentBytesUnarchivedInFile
;
var
change
=
rBuffer
.
ptr
-
currentBytesUnarchivedInFile
;
currentBytesUnarchivedInFile
=
rBuffer
.
ptr
;
currentBytesUnarchived
+=
change
;
postProgress
();
}
const
rNC20
=
298
;
const
rDC20
=
48
;
const
rRC20
=
28
;
const
rBC20
=
19
;
const
rMC20
=
257
;
var
rNC20
=
298
,
rDC20
=
48
,
rRC20
=
28
,
rBC20
=
19
,
rMC20
=
257
;
const
UnpOldTable20
=
new
Array
(
rMC20
*
4
);
var
UnpOldTable20
=
new
Array
(
rMC20
*
4
);
// TODO: This function should return a boolean value, see unpack20.cpp.
function
RarReadTables20
(
bstream
)
{
const
BitLength
=
new
Array
(
rBC20
);
const
Table
=
new
Array
(
rMC20
*
4
);
let
TableSize
;
let
N
;
let
I
;
const
AudioBlock
=
bstream
.
readBits
(
1
);
var
BitLength
=
new
Array
(
rBC20
);
var
Table
=
new
Array
(
rMC20
*
4
);
var
TableSize
,
N
,
I
;
var
AudioBlock
=
bstream
.
readBits
(
1
);
if
(
!
bstream
.
readBits
(
1
))
{
for
(
let
i
=
UnpOldTable20
.
length
;
i
--
;)
{
for
(
var
i
=
UnpOldTable20
.
length
;
i
--
;)
{
UnpOldTable20
[
i
]
=
0
;
}
}
TableSize
=
rNC20
+
rDC20
+
rRC20
;
for
(
I
=
0
;
I
<
rBC20
;
I
++
)
{
for
(
var
I
=
0
;
I
<
rBC20
;
I
++
)
{
BitLength
[
I
]
=
bstream
.
readBits
(
4
);
}
RarMakeDecodeTables
(
BitLength
,
0
,
BD
,
rBC20
);
I
=
0
;
while
(
I
<
TableSize
)
{
const
num
=
RarDecodeNumber
(
bstream
,
BD
);
var
num
=
RarDecodeNumber
(
bstream
,
BD
);
if
(
num
<
16
)
{
Table
[
I
]
=
num
+
UnpOldTable20
[
I
]
&
0xf
;
I
++
;
...
...
@@ -642,43 +592,43 @@ function RarReadTables20(bstream) {
RarMakeDecodeTables
(
Table
,
0
,
LD
,
rNC20
);
RarMakeDecodeTables
(
Table
,
rNC20
,
DD
,
rDC20
);
RarMakeDecodeTables
(
Table
,
rNC20
+
rDC20
,
RD
,
rRC20
);
for
(
let
i
=
UnpOldTable20
.
length
;
i
--
;)
{
for
(
var
i
=
UnpOldTable20
.
length
;
i
--
;)
{
UnpOldTable20
[
i
]
=
Table
[
i
];
}
}
let
lowDistRepCount
=
0
;
let
prevLowDist
=
0
;
var
lowDistRepCount
=
0
;
var
prevLowDist
=
0
;
let
rOldDist
=
[
0
,
0
,
0
,
0
];
let
lastDist
;
let
lastLength
;
var
rOldDist
=
[
0
,
0
,
0
,
0
];
var
lastDist
;
var
lastLength
;
// ============================================================================================== //
// Unpack code specific to RarVM
const
VM
=
new
RarVM
();
var
VM
=
new
RarVM
();
/**
* Filters code, one entry per filter.
* @type {Array<UnpackFilter>}
*/
let
Filters
=
[];
var
Filters
=
[];
/**
* Filters stack, several entrances of same filter are possible.
* @type {Array<UnpackFilter>}
*/
let
PrgStack
=
[];
var
PrgStack
=
[];
/**
* Lengths of preceding blocks, one length per filter. Used to reduce
* size required to write block length if lengths are repeating.
* @type {Array<number>}
*/
let
OldFilterLengths
=
[];
var
OldFilterLengths
=
[];
let
LastFilter
=
0
;
var
LastFilter
=
0
;
function
InitFilters
()
{
OldFilterLengths
=
[];
...
...
@@ -694,9 +644,9 @@ function InitFilters() {
*/
function
RarAddVMCode
(
firstByte
,
vmCode
)
{
VM
.
init
();
const
bstream
=
new
bitjs
.
io
.
BitStream
(
vmCode
.
buffer
,
true
/* rtl */
);
var
bstream
=
new
bitjs
.
io
.
BitStream
(
vmCode
.
buffer
,
true
/* rtl */
);
let
filtPos
;
var
filtPos
;
if
(
firstByte
&
0x80
)
{
filtPos
=
RarVM
.
readData
(
bstream
);
if
(
filtPos
==
0
)
{
...
...
@@ -713,11 +663,11 @@ function RarAddVMCode(firstByte, vmCode) {
}
LastFilter
=
filtPos
;
const
newFilter
=
(
filtPos
==
Filters
.
length
);
var
newFilter
=
(
filtPos
==
Filters
.
length
);
// new filter for PrgStack
const
stackFilter
=
new
UnpackFilter
();
let
filter
=
null
;
var
stackFilter
=
new
UnpackFilter
();
var
filter
=
null
;
// new filter code, never used before since VM reset
if
(
newFilter
)
{
// too many different filters, corrupt archive
...
...
@@ -736,8 +686,8 @@ function RarAddVMCode(firstByte, vmCode) {
filter
.
ExecCount
++
;
}
let
emptyCount
=
0
;
for
(
let
i
=
0
;
i
<
PrgStack
.
length
;
++
i
)
{
var
emptyCount
=
0
;
for
(
var
i
=
0
;
i
<
PrgStack
.
length
;
++
i
)
{
PrgStack
[
i
-
emptyCount
]
=
PrgStack
[
i
];
if
(
PrgStack
[
i
]
==
null
)
{
...
...
@@ -753,11 +703,11 @@ function RarAddVMCode(firstByte, vmCode) {
emptyCount
=
1
;
}
const
stackPos
=
PrgStack
.
length
-
emptyCount
;
var
stackPos
=
PrgStack
.
length
-
emptyCount
;
PrgStack
[
stackPos
]
=
stackFilter
;
stackFilter
.
ExecCount
=
filter
.
ExecCount
;
let
blockStart
=
RarVM
.
readData
(
bstream
);
var
blockStart
=
RarVM
.
readData
(
bstream
);
if
(
firstByte
&
0x40
)
{
blockStart
+=
258
;
}
...
...
@@ -775,7 +725,7 @@ function RarAddVMCode(firstByte, vmCode) {
OldFilterLengths
[
filtPos
]
=
stackFilter
.
BlockLength
;
for
(
let
i
=
0
;
i
<
7
;
++
i
)
{
for
(
var
i
=
0
;
i
<
7
;
++
i
)
{
stackFilter
.
Prg
.
InitR
[
i
]
=
0
;
}
stackFilter
.
Prg
.
InitR
[
3
]
=
VM_GLOBALMEMADDR
;
...
...
@@ -784,8 +734,8 @@ function RarAddVMCode(firstByte, vmCode) {
// set registers to optional parameters if any
if
(
firstByte
&
0x10
)
{
const
initMask
=
bstream
.
readBits
(
7
);
for
(
let
i
=
0
;
i
<
7
;
++
i
)
{
var
initMask
=
bstream
.
readBits
(
7
);
for
(
var
i
=
0
;
i
<
7
;
++
i
)
{
if
(
initMask
&
(
1
<<
i
))
{
stackFilter
.
Prg
.
InitR
[
i
]
=
RarVM
.
readData
(
bstream
);
}
...
...
@@ -793,12 +743,12 @@ function RarAddVMCode(firstByte, vmCode) {
}
if
(
newFilter
)
{
const
vmCodeSize
=
RarVM
.
readData
(
bstream
);
var
vmCodeSize
=
RarVM
.
readData
(
bstream
);
if
(
vmCodeSize
>=
0x10000
||
vmCodeSize
==
0
)
{
return
false
;
}
const
vmCode
=
new
Uint8Array
(
vmCodeSize
);
for
(
let
i
=
0
;
i
<
vmCodeSize
;
++
i
)
{
var
vmCode
=
new
Uint8Array
(
vmCodeSize
);
for
(
var
i
=
0
;
i
<
vmCodeSize
;
++
i
)
{
//if (Inp.Overflow(3))
// return(false);
vmCode
[
i
]
=
bstream
.
readBits
(
8
);
...
...
@@ -808,10 +758,10 @@ function RarAddVMCode(firstByte, vmCode) {
stackFilter
.
Prg
.
Cmd
=
filter
.
Prg
.
Cmd
;
stackFilter
.
Prg
.
AltCmd
=
filter
.
Prg
.
Cmd
;
const
staticDataSize
=
filter
.
Prg
.
StaticData
.
length
;
var
staticDataSize
=
filter
.
Prg
.
StaticData
.
length
;
if
(
staticDataSize
>
0
&&
staticDataSize
<
VM_GLOBALMEMSIZE
)
{
// read statically defined data contained in DB commands
for
(
let
i
=
0
;
i
<
staticDataSize
;
++
i
)
{
for
(
var
i
=
0
;
i
<
staticDataSize
;
++
i
)
{
stackFilter
.
Prg
.
StaticData
[
i
]
=
filter
.
Prg
.
StaticData
[
i
];
}
}
...
...
@@ -820,15 +770,15 @@ function RarAddVMCode(firstByte, vmCode) {
stackFilter
.
Prg
.
GlobalData
=
new
Uint8Array
(
VM_FIXEDGLOBALSIZE
);
}
const
globalData
=
stackFilter
.
Prg
.
GlobalData
;
for
(
let
i
=
0
;
i
<
7
;
++
i
)
{
var
globalData
=
stackFilter
.
Prg
.
GlobalData
;
for
(
var
i
=
0
;
i
<
7
;
++
i
)
{
VM
.
setLowEndianValue
(
globalData
,
stackFilter
.
Prg
.
InitR
[
i
],
i
*
4
);
}
VM
.
setLowEndianValue
(
globalData
,
stackFilter
.
BlockLength
,
0x1c
);
VM
.
setLowEndianValue
(
globalData
,
0
,
0x20
);
VM
.
setLowEndianValue
(
globalData
,
stackFilter
.
ExecCount
,
0x2c
);
for
(
let
i
=
0
;
i
<
16
;
++
i
)
{
for
(
var
i
=
0
;
i
<
16
;
++
i
)
{
globalData
[
0x30
+
i
]
=
0
;
}
...
...
@@ -836,23 +786,23 @@ function RarAddVMCode(firstByte, vmCode) {
if
(
firstByte
&
8
)
{
//if (Inp.Overflow(3))
// return(false);
const
dataSize
=
RarVM
.
readData
(
bstream
);
var
dataSize
=
RarVM
.
readData
(
bstream
);
if
(
dataSize
>
(
VM_GLOBALMEMSIZE
-
VM_FIXEDGLOBALSIZE
))
{
return
false
;
return
(
false
)
;
}
const
curSize
=
stackFilter
.
Prg
.
GlobalData
.
length
;
var
curSize
=
stackFilter
.
Prg
.
GlobalData
.
length
;
if
(
curSize
<
dataSize
+
VM_FIXEDGLOBALSIZE
)
{
// Resize global data and update the stackFilter and local variable.
const
numBytesToAdd
=
dataSize
+
VM_FIXEDGLOBALSIZE
-
curSize
;
const
newGlobalData
=
new
Uint8Array
(
globalData
.
length
+
numBytesToAdd
);
var
numBytesToAdd
=
dataSize
+
VM_FIXEDGLOBALSIZE
-
curSize
;
var
newGlobalData
=
new
Uint8Array
(
globalData
.
length
+
numBytesToAdd
);
newGlobalData
.
set
(
globalData
);
stackFilter
.
Prg
.
GlobalData
=
newGlobalData
;
globalData
=
newGlobalData
;
}
//byte *GlobalData=&StackFilter->Prg.GlobalData[VM_FIXEDGLOBALSIZE];
for
(
let
i
=
0
;
i
<
dataSize
;
++
i
)
{
for
(
var
i
=
0
;
i
<
dataSize
;
++
i
)
{
//if (Inp.Overflow(3))
// return(false);
globalData
[
VM_FIXEDGLOBALSIZE
+
i
]
=
bstream
.
readBits
(
8
);
...
...
@@ -867,8 +817,8 @@ function RarAddVMCode(firstByte, vmCode) {
* @param {!bitjs.io.BitStream} bstream
*/
function
RarReadVMCode
(
bstream
)
{
const
firstByte
=
bstream
.
readBits
(
8
);
let
length
=
(
firstByte
&
7
)
+
1
;
var
firstByte
=
bstream
.
readBits
(
8
);
var
length
=
(
firstByte
&
7
)
+
1
;
if
(
length
==
7
)
{
length
=
bstream
.
readBits
(
8
)
+
7
;
}
else
if
(
length
==
8
)
{
...
...
@@ -876,8 +826,8 @@ function RarReadVMCode(bstream) {
}
// Read all bytes of VM code into an array.
const
vmCode
=
new
Uint8Array
(
length
);
for
(
let
i
=
0
;
i
<
length
;
i
++
)
{
var
vmCode
=
new
Uint8Array
(
length
);
for
(
var
i
=
0
;
i
<
length
;
i
++
)
{
// Do something here with checking readbuf.
vmCode
[
i
]
=
bstream
.
readBits
(
8
);
}
...
...
@@ -892,21 +842,21 @@ function RarReadVMCode(bstream) {
function
Unpack29
(
bstream
,
Solid
)
{
// lazy initialize rDDecode and rDBits
const
DDecode
=
new
Array
(
rDC
);
const
DBits
=
new
Array
(
rDC
);
var
DDecode
=
new
Array
(
rDC
);
var
DBits
=
new
Array
(
rDC
);
let
Dist
=
0
;
let
BitLength
=
0
;
let
Slot
=
0
;
var
Dist
=
0
;
var
BitLength
=
0
;
var
Slot
=
0
;
for
(
let
I
=
0
;
I
<
rDBitLengthCounts
.
length
;
I
++
,
BitLength
++
)
{
for
(
let
J
=
0
;
J
<
rDBitLengthCounts
[
I
];
J
++
,
Slot
++
,
Dist
+=
(
1
<<
BitLength
))
{
for
(
var
I
=
0
;
I
<
rDBitLengthCounts
.
length
;
I
++
,
BitLength
++
)
{
for
(
var
J
=
0
;
J
<
rDBitLengthCounts
[
I
];
J
++
,
Slot
++
,
Dist
+=
(
1
<<
BitLength
))
{
DDecode
[
Slot
]
=
Dist
;
DBits
[
Slot
]
=
BitLength
;
}
}
let
Bits
;
var
Bits
;
//tablesRead = false;
rOldDist
=
[
0
,
0
,
0
,
0
]
...
...
@@ -914,7 +864,7 @@ function Unpack29(bstream, Solid) {
lastDist
=
0
;
lastLength
=
0
;
for
(
let
i
=
UnpOldTable
.
length
;
i
--
;)
{
for
(
var
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
0
;
}
...
...
@@ -922,19 +872,19 @@ function Unpack29(bstream, Solid) {
RarReadTables
(
bstream
);
while
(
true
)
{
let
num
=
RarDecodeNumber
(
bstream
,
LD
);
var
num
=
RarDecodeNumber
(
bstream
,
LD
);
if
(
num
<
256
)
{
rBuffer
.
insertByte
(
num
);
continue
;
}
if
(
num
>=
271
)
{
let
Length
=
rLDecode
[
num
-=
271
]
+
3
;
var
Length
=
rLDecode
[
num
-=
271
]
+
3
;
if
((
Bits
=
rLBits
[
num
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
const
DistNumber
=
RarDecodeNumber
(
bstream
,
DD
);
let
Distance
=
DDecode
[
DistNumber
]
+
1
;
var
DistNumber
=
RarDecodeNumber
(
bstream
,
DD
);
var
Distance
=
DDecode
[
DistNumber
]
+
1
;
if
((
Bits
=
DBits
[
DistNumber
])
>
0
)
{
if
(
DistNumber
>
9
)
{
if
(
Bits
>
4
)
{
...
...
@@ -946,7 +896,7 @@ function Unpack29(bstream, Solid) {
lowDistRepCount
--
;
Distance
+=
prevLowDist
;
}
else
{
const
LowDist
=
RarDecodeNumber
(
bstream
,
LDD
);
var
LowDist
=
RarDecodeNumber
(
bstream
,
LDD
);
if
(
LowDist
==
16
)
{
lowDistRepCount
=
rLOW_DIST_REP_COUNT
-
1
;
Distance
+=
prevLowDist
;
...
...
@@ -989,16 +939,16 @@ function Unpack29(bstream, Solid) {
continue
;
}
if
(
num
<
263
)
{
const
DistNum
=
num
-
259
;
const
Distance
=
rOldDist
[
DistNum
];
var
DistNum
=
num
-
259
;
var
Distance
=
rOldDist
[
DistNum
];
for
(
let
I
=
DistNum
;
I
>
0
;
I
--
)
{
for
(
var
I
=
DistNum
;
I
>
0
;
I
--
)
{
rOldDist
[
I
]
=
rOldDist
[
I
-
1
];
}
rOldDist
[
0
]
=
Distance
;
const
LengthNumber
=
RarDecodeNumber
(
bstream
,
RD
);
let
Length
=
rLDecode
[
LengthNumber
]
+
2
;
var
LengthNumber
=
RarDecodeNumber
(
bstream
,
RD
);
var
Length
=
rLDecode
[
LengthNumber
]
+
2
;
if
((
Bits
=
rLBits
[
LengthNumber
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
...
...
@@ -1007,7 +957,7 @@ function Unpack29(bstream, Solid) {
continue
;
}
if
(
num
<
272
)
{
let
Distance
=
rSDDecode
[
num
-=
263
]
+
1
;
var
Distance
=
rSDDecode
[
num
-=
263
]
+
1
;
if
((
Bits
=
rSDBits
[
num
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
...
...
@@ -1026,10 +976,10 @@ function Unpack29(bstream, Solid) {
* the filters loaded into the RarVM and writes out to wBuffer.
*/
function
RarWriteBuf
()
{
let
writeSize
=
(
rBuffer
.
ptr
&
MAXWINMASK
);
var
writeSize
=
(
rBuffer
.
ptr
&
MAXWINMASK
);
for
(
let
i
=
0
;
i
<
PrgStack
.
length
;
++
i
)
{
const
flt
=
PrgStack
[
i
];
for
(
var
i
=
0
;
i
<
PrgStack
.
length
;
++
i
)
{
var
flt
=
PrgStack
[
i
];
if
(
flt
==
null
)
{
continue
;
}
...
...
@@ -1039,8 +989,8 @@ function RarWriteBuf() {
continue
;
}
const
blockStart
=
flt
.
BlockStart
;
const
blockLength
=
flt
.
BlockLength
;
var
blockStart
=
flt
.
BlockStart
;
var
blockLength
=
flt
.
BlockLength
;
// WrittenBorder = wBuffer.ptr
if
(((
blockStart
-
wBuffer
.
ptr
)
&
MAXWINMASK
)
<
writeSize
)
{
...
...
@@ -1050,17 +1000,17 @@ function RarWriteBuf() {
writeSize
=
(
rBuffer
.
ptr
-
wBuffer
.
ptr
)
&
MAXWINMASK
;
}
if
(
blockLength
<=
writeSize
)
{
const
blockEnd
=
(
blockStart
+
blockLength
)
&
MAXWINMASK
;
var
blockEnd
=
(
blockStart
+
blockLength
)
&
MAXWINMASK
;
if
(
blockStart
<
blockEnd
||
blockEnd
==
0
)
{
VM
.
setMemory
(
0
,
rBuffer
.
data
.
subarray
(
blockStart
,
blockStart
+
blockLength
),
blockLength
);
}
else
{
const
firstPartLength
=
MAXWINSIZE
-
blockStart
;
var
firstPartLength
=
MAXWINSIZE
-
blockStart
;
VM
.
setMemory
(
0
,
rBuffer
.
data
.
subarray
(
blockStart
,
blockStart
+
firstPartLength
),
firstPartLength
);
VM
.
setMemory
(
firstPartLength
,
rBuffer
.
data
,
blockEnd
);
}
const
parentPrg
=
Filters
[
flt
.
ParentFilter
].
Prg
;
const
prg
=
flt
.
Prg
;
var
parentPrg
=
Filters
[
flt
.
ParentFilter
].
Prg
;
var
prg
=
flt
.
Prg
;
if
(
parentPrg
.
GlobalData
.
length
>
VM_FIXEDGLOBALSIZE
)
{
// Copy global data from previous script execution if any.
...
...
@@ -1071,7 +1021,7 @@ function RarWriteBuf() {
if
(
prg
.
GlobalData
.
length
>
VM_FIXEDGLOBALSIZE
)
{
// Save global data for next script execution.
const
globalDataLen
=
prg
.
GlobalData
.
length
;
var
globalDataLen
=
prg
.
GlobalData
.
length
;
if
(
parentPrg
.
GlobalData
.
length
<
globalDataLen
)
{
parentPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
}
...
...
@@ -1082,11 +1032,11 @@ function RarWriteBuf() {
parentPrg
.
GlobalData
=
new
Uint8Array
(
0
);
}
let
filteredData
=
prg
.
FilteredData
;
var
filteredData
=
prg
.
FilteredData
;
PrgStack
[
i
]
=
null
;
while
(
i
+
1
<
PrgStack
.
length
)
{
const
nextFilter
=
PrgStack
[
i
+
1
];
var
nextFilter
=
PrgStack
[
i
+
1
];
if
(
nextFilter
==
null
||
nextFilter
.
BlockStart
!=
blockStart
||
nextFilter
.
BlockLength
!=
filteredData
.
length
||
nextFilter
.
NextWindow
)
{
break
;
...
...
@@ -1096,29 +1046,29 @@ function RarWriteBuf() {
VM
.
setMemory
(
0
,
filteredData
,
filteredData
.
length
);
const
innerP
arentPrg
=
Filters
[
nextFilter
.
ParentFilter
].
Prg
;
const
nextPrg
=
nextFilter
.
Prg
;
var
p
arentPrg
=
Filters
[
nextFilter
.
ParentFilter
].
Prg
;
var
nextPrg
=
nextFilter
.
Prg
;
const
globalDataLen
=
innerP
arentPrg
.
GlobalData
.
length
;
var
globalDataLen
=
p
arentPrg
.
GlobalData
.
length
;
if
(
globalDataLen
>
VM_FIXEDGLOBALSIZE
)
{
// Copy global data from previous script execution if any.
nextPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
nextPrg
.
GlobalData
.
set
(
innerP
arentPrg
.
GlobalData
.
subarray
(
VM_FIXEDGLOBALSIZE
,
VM_FIXEDGLOBALSIZE
+
globalDataLen
),
VM_FIXEDGLOBALSIZE
);
nextPrg
.
GlobalData
.
set
(
p
arentPrg
.
GlobalData
.
subarray
(
VM_FIXEDGLOBALSIZE
,
VM_FIXEDGLOBALSIZE
+
globalDataLen
),
VM_FIXEDGLOBALSIZE
);
}
RarExecuteCode
(
nextPrg
);
if
(
nextPrg
.
GlobalData
.
length
>
VM_GLOBALMEMSIZE
)
{
// Save global data for next script execution.
const
globalDataLen
=
nextPrg
.
GlobalData
.
length
;
if
(
innerP
arentPrg
.
GlobalData
.
length
<
globalDataLen
)
{
innerP
arentPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
var
globalDataLen
=
nextPrg
.
GlobalData
.
length
;
if
(
p
arentPrg
.
GlobalData
.
length
<
globalDataLen
)
{
p
arentPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
}
innerP
arentPrg
.
GlobalData
.
set
(
p
arentPrg
.
GlobalData
.
set
(
this
.
mem_
.
subarray
(
VM_FIXEDGLOBALSIZE
,
VM_FIXEDGLOBALSIZE
+
globalDataLen
),
VM_FIXEDGLOBALSIZE
);
}
else
{
innerP
arentPrg
.
GlobalData
=
new
Uint8Array
(
0
);
p
arentPrg
.
GlobalData
=
new
Uint8Array
(
0
);
}
filteredData
=
nextPrg
.
FilteredData
;
...
...
@@ -1126,22 +1076,23 @@ function RarWriteBuf() {
PrgStack
[
i
]
=
null
;
}
// while (i + 1 < PrgStack.length)
for
(
let
j
=
0
;
j
<
filteredData
.
length
;
++
j
)
{
for
(
var
j
=
0
;
j
<
filteredData
.
length
;
++
j
)
{
wBuffer
.
insertByte
(
filteredData
[
j
]);
}
writeSize
=
(
rBuffer
.
ptr
-
wBuffer
.
ptr
)
&
MAXWINMASK
;
}
// if (blockLength <= writeSize)
else
{
for
(
let
j
=
i
;
j
<
PrgStack
.
length
;
++
j
)
{
const
theF
lt
=
PrgStack
[
j
];
if
(
theFlt
!=
null
&&
theF
lt
.
NextWindow
)
{
theF
lt
.
NextWindow
=
false
;
for
(
var
j
=
i
;
j
<
PrgStack
.
length
;
++
j
)
{
var
f
lt
=
PrgStack
[
j
];
if
(
flt
!=
null
&&
f
lt
.
NextWindow
)
{
f
lt
.
NextWindow
=
false
;
}
}
//WrPtr=WrittenBorder;
return
;
}
}
// if (((blockStart - wBuffer.ptr) & MAXWINMASK) < writeSize)
}
// for (
let
i = 0; i < PrgStack.length; ++i)
}
// for (
var
i = 0; i < PrgStack.length; ++i)
// Write any remaining bytes from rBuffer to wBuffer;
RarWriteArea
(
wBuffer
.
ptr
,
rBuffer
.
ptr
);
...
...
@@ -1175,11 +1126,11 @@ function RarWriteData(offset, numBytes) {
if
(
wBuffer
.
ptr
>=
rBuffer
.
data
.
length
)
{
return
;
}
const
leftToWrite
=
rBuffer
.
data
.
length
-
wBuffer
.
ptr
;
var
leftToWrite
=
rBuffer
.
data
.
length
-
wBuffer
.
ptr
;
if
(
numBytes
>
leftToWrite
)
{
numBytes
=
leftToWrite
;
}
for
(
let
i
=
0
;
i
<
numBytes
;
++
i
)
{
for
(
var
i
=
0
;
i
<
numBytes
;
++
i
)
{
wBuffer
.
insertByte
(
rBuffer
.
data
[
offset
+
i
]);
}
}
...
...
@@ -1190,7 +1141,7 @@ function RarWriteData(offset, numBytes) {
function
RarExecuteCode
(
prg
)
{
if
(
prg
.
GlobalData
.
length
>
0
)
{
const
writtenFileSize
=
wBuffer
.
ptr
;
var
writtenFileSize
=
wBuffer
.
ptr
;
prg
.
InitR
[
6
]
=
writtenFileSize
;
VM
.
setLowEndianValue
(
prg
.
GlobalData
,
writtenFileSize
,
0x24
);
VM
.
setLowEndianValue
(
prg
.
GlobalData
,
(
writtenFileSize
>>>
32
)
>>
0
,
0x28
);
...
...
@@ -1201,8 +1152,7 @@ function RarExecuteCode(prg)
function
RarReadEndOfBlock
(
bstream
)
{
RarUpdateProgress
();
let
NewTable
=
false
;
let
NewFile
=
false
;
var
NewTable
=
false
,
NewFile
=
false
;
if
(
bstream
.
readBits
(
1
))
{
NewTable
=
true
;
}
else
{
...
...
@@ -1231,14 +1181,13 @@ function RarInsertOldDist(distance) {
* pointer to start copying from.
*/
function
RarCopyString
(
len
,
distance
)
{
let
srcPtr
=
rBuffer
.
ptr
-
distance
;
// If we need to go back to previous buffers, then seek back.
var
srcPtr
=
rBuffer
.
ptr
-
distance
;
if
(
srcPtr
<
0
)
{
let
l
=
rOldBuffers
.
length
;
var
l
=
rOldBuffers
.
length
;
while
(
srcPtr
<
0
)
{
srcPtr
=
rOldBuffers
[
--
l
].
data
.
length
+
srcPtr
;
}
// TODO: lets hope that it never needs to read
across buffer
boundaries
// TODO: lets hope that it never needs to read
beyond file
boundaries
while
(
len
--
)
{
rBuffer
.
insertByte
(
rOldBuffers
[
l
].
data
[
srcPtr
++
]);
}
...
...
@@ -1257,15 +1206,13 @@ function RarCopyString(len, distance) {
*/
function
unpack
(
v
)
{
// TODO: implement what happens when unpVer is < 15
const
Ver
=
v
.
header
.
unpVer
<=
15
?
15
:
v
.
header
.
unpVer
;
const
Solid
=
v
.
header
.
flags
.
LHD_SOLID
;
const
bstream
=
new
bitjs
.
io
.
BitStream
(
v
.
fileData
.
buffer
,
true
/* rtl */
,
v
.
fileData
.
byteOffset
,
v
.
fileData
.
byteLength
);
var
Ver
=
v
.
header
.
unpVer
<=
15
?
15
:
v
.
header
.
unpVer
;
var
Solid
=
v
.
header
.
LHD_SOLID
;
var
bstream
=
new
bitjs
.
io
.
BitStream
(
v
.
fileData
.
buffer
,
true
/* rtl */
,
v
.
fileData
.
byteOffset
,
v
.
fileData
.
byteLength
);
rBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
v
.
header
.
unpackedSize
);
if
(
logToConsole
)
{
info
(
'Unpacking '
+
v
.
filename
+
' RAR v'
+
Ver
);
}
info
(
"Unpacking "
+
v
.
filename
+
" RAR v"
+
Ver
);
switch
(
Ver
)
{
case
15
:
// rar 1.5 compression
...
...
@@ -1281,179 +1228,131 @@ function unpack(v) {
Unpack29
(
bstream
,
Solid
);
break
;
}
// switch(method)
rOldBuffers
.
push
(
rBuffer
);
// TODO: clear these old buffers when there's over 4MB of history
return
rBuffer
.
data
;
}
/**
*/
class
RarLocalFile
{
/**
* @param {bitjs.io.ByteStream} bstream
*/
constructor
(
bstream
)
{
this
.
header
=
new
RarVolumeHeader
(
bstream
);
this
.
filename
=
this
.
header
.
filename
;
if
(
this
.
header
.
headType
!=
FILE_HEAD
&&
this
.
header
.
headType
!=
ENDARC_HEAD
)
{
this
.
isValid
=
false
;
info
(
'Error! RAR Volume did not include a FILE_HEAD header '
);
}
else
{
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
header
.
packSize
>
0
)
{
this
.
fileData
=
bstream
.
readBytes
(
this
.
header
.
packSize
);
this
.
isValid
=
true
;
}
}
// bstream is a bit stream
var
RarLocalFile
=
function
(
bstream
)
{
this
.
header
=
new
RarVolumeHeader
(
bstream
);
this
.
filename
=
this
.
header
.
filename
;
if
(
this
.
header
.
headType
!=
FILE_HEAD
&&
this
.
header
.
headType
!=
ENDARC_HEAD
)
{
this
.
isValid
=
false
;
info
(
"Error! RAR Volume did not include a FILE_HEAD header "
);
}
unrar
()
{
if
(
!
this
.
header
.
flags
.
LHD_SPLIT_BEFORE
)
{
// unstore file
if
(
this
.
header
.
method
==
0x30
)
{
if
(
logToConsole
)
{
info
(
'Unstore '
+
this
.
filename
);
}
this
.
isValid
=
true
;
currentBytesUnarchivedInFile
+=
this
.
fileData
.
length
;
currentBytesUnarchived
+=
this
.
fileData
.
length
;
// Create a new buffer and copy it over.
const
len
=
this
.
header
.
packSize
;
const
newBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
len
);
newBuffer
.
insertBytes
(
this
.
fileData
);
this
.
fileData
=
newBuffer
.
data
;
}
else
{
this
.
isValid
=
true
;
this
.
fileData
=
unpack
(
this
);
}
else
{
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
header
.
packSize
>
0
)
{
this
.
fileData
=
bstream
.
readBytes
(
this
.
header
.
packSize
);
this
.
isValid
=
true
;
}
}
}
// Reads in the volume and main header.
function
unrar_start
()
{
let
bstream
=
bytestream
.
tee
();
const
header
=
new
RarVolumeHeader
(
bstream
);
if
(
header
.
crc
==
0x6152
&&
header
.
headType
==
0x72
&&
header
.
flags
.
value
==
0x1A21
&&
header
.
headSize
==
7
)
{
if
(
logToConsole
)
{
info
(
'Found RAR signature'
);
}
};
const
mhead
=
new
RarVolumeHeader
(
bstream
);
if
(
mhead
.
headType
!=
MAIN_HEAD
)
{
info
(
'Error! RAR did not include a MAIN_HEAD header'
);
RarLocalFile
.
prototype
.
unrar
=
function
()
{
if
(
!
this
.
header
.
flags
.
LHD_SPLIT_BEFORE
)
{
// unstore file
if
(
this
.
header
.
method
==
0x30
)
{
info
(
"Unstore "
+
this
.
filename
);
this
.
isValid
=
true
;
currentBytesUnarchivedInFile
+=
this
.
fileData
.
length
;
currentBytesUnarchived
+=
this
.
fileData
.
length
;
// Create a new buffer and copy it over.
var
len
=
this
.
header
.
packSize
;
var
newBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
len
);
newBuffer
.
insertBytes
(
this
.
fileData
);
this
.
fileData
=
newBuffer
.
data
;
}
else
{
bytestream
=
bstream
.
tee
();
this
.
isValid
=
true
;
this
.
fileData
=
unpack
(
this
);
}
}
}
function
unrar
()
{
let
bstream
=
bytestream
.
tee
();
var
unrar
=
function
(
arrayBuffer
)
{
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
let
localFile
=
null
;
do
{
localFile
=
new
RarLocalFile
(
bstream
);
if
(
logToConsole
)
{
info
(
'RAR localFile isValid='
+
localFile
.
isValid
+
', volume packSize='
+
localFile
.
header
.
packSize
);
localFile
.
header
.
dump
();
}
if
(
localFile
&&
localFile
.
isValid
&&
localFile
.
header
.
packSize
>
0
)
{
bytestream
=
bstream
.
tee
();
totalUncompressedBytesInArchive
+=
localFile
.
header
.
unpackedSize
;
allLocalFiles
.
push
(
localFile
);
currentFilename
=
localFile
.
header
.
filename
;
currentBytesUnarchivedInFile
=
0
;
localFile
.
unrar
();
if
(
localFile
.
isValid
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localFile
));
postProgress
();
}
}
else
if
(
localFile
.
header
.
packSize
==
0
&&
localFile
.
header
.
unpackedSize
==
0
)
{
// Skip this file.
localFile
.
isValid
=
true
;
}
}
while
(
localFile
.
isValid
&&
bstream
.
getNumBytesLeft
()
>
0
);
totalFilesInArchive
=
allLocalFiles
.
length
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
var
bstream
=
new
bitjs
.
io
.
BitStream
(
arrayBuffer
,
false
/* rtl */
);
postProgress
();
bytestream
=
bstream
.
tee
();
};
// event.data.file has the first ArrayBuffer.
// event.data.bytes has all subsequent ArrayBuffers.
onmessage
=
function
(
event
)
{
const
bytes
=
event
.
data
.
file
||
event
.
data
.
bytes
;
logToConsole
=
!!
event
.
data
.
logToConsole
;
// This is the very first time we have been called. Initialize the bytestream.
if
(
!
bytestream
)
{
bytestream
=
new
bitjs
.
io
.
ByteStream
(
bytes
);
currentFilename
=
''
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
allLocalFiles
=
[];
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
}
else
{
bytestream
.
push
(
bytes
);
}
var
header
=
new
RarVolumeHeader
(
bstream
);
if
(
header
.
crc
==
0x6152
&&
header
.
headType
==
0x72
&&
header
.
flags
.
value
==
0x1A21
&&
header
.
headSize
==
7
)
{
info
(
"Found RAR signature"
);
if
(
unarchiveState
===
UnarchiveState
.
NOT_STARTED
)
{
try
{
unrar_start
();
unarchiveState
=
UnarchiveState
.
UNARCHIVING
;
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
if
(
logToConsole
)
{
console
.
dir
(
e
);
}
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
postProgress
();
}
else
{
console
.
error
(
'Found an error while unrarring'
);
console
.
dir
(
e
);
throw
e
;
}
var
mhead
=
new
RarVolumeHeader
(
bstream
);
if
(
mhead
.
headType
!=
MAIN_HEAD
)
{
info
(
"Error! RAR did not include a MAIN_HEAD header"
);
}
}
if
(
unarchiveState
===
UnarchiveState
.
UNARCHIVING
||
unarchiveState
===
UnarchiveState
.
WAITING
)
{
try
{
unrar
();
unarchiveState
=
UnarchiveState
.
FINISHED
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
if
(
logToConsole
)
{
console
.
dir
(
e
);
else
{
var
localFiles
=
[],
localFile
=
null
;
do
{
try
{
localFile
=
new
RarLocalFile
(
bstream
);
info
(
"RAR localFile isValid="
+
localFile
.
isValid
+
", volume packSize="
+
localFile
.
header
.
packSize
);
if
(
localFile
&&
localFile
.
isValid
&&
localFile
.
header
.
packSize
>
0
)
{
totalUncompressedBytesInArchive
+=
localFile
.
header
.
unpackedSize
;
localFiles
.
push
(
localFile
);
}
else
if
(
localFile
.
header
.
packSize
==
0
&&
localFile
.
header
.
unpackedSize
==
0
)
{
localFile
.
isValid
=
true
;
}
}
catch
(
err
)
{
break
;
}
//info("bstream" + bstream.bytePtr+"/"+bstream.bytes.length);
}
while
(
localFile
.
isValid
);
totalFilesInArchive
=
localFiles
.
length
;
// now we have all information but things are unpacked
// TODO: unpack
localFiles
=
localFiles
.
sort
(
function
(
a
,
b
)
{
var
aname
=
a
.
filename
.
toLowerCase
();
var
bname
=
b
.
filename
.
toLowerCase
();
return
aname
>
bname
?
1
:
-
1
;
});
info
(
localFiles
.
map
(
function
(
a
){
return
a
.
filename
}).
join
(
', '
));
for
(
var
i
=
0
;
i
<
localFiles
.
length
;
++
i
)
{
var
localfile
=
localFiles
[
i
];
// update progress
currentFilename
=
localfile
.
header
.
filename
;
currentBytesUnarchivedInFile
=
0
;
// actually do the unzipping
localfile
.
unrar
();
if
(
localfile
.
isValid
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localfile
));
postProgress
();
}
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
}
else
{
console
.
error
(
'Found an error while unrarring'
);
console
.
dir
(
e
);
throw
e
;
}
postProgress
();
}
}
else
{
err
(
"Invalid RAR file"
);
}
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
};
// event.data.file has the ArrayBuffer.
onmessage
=
function
(
event
)
{
var
ab
=
event
.
data
.
file
;
unrar
(
ab
,
true
);
};
cps/static/js/archive/untar.js
View file @
7982ed87
...
...
@@ -14,193 +14,177 @@
importScripts
(
'../io/bytestream.js'
);
importScripts
(
'archive.js'
);
const
UnarchiveState
=
{
NOT_STARTED
:
0
,
UNARCHIVING
:
1
,
WAITING
:
2
,
FINISHED
:
3
,
};
// State - consider putting these into a class.
let
unarchiveState
=
UnarchiveState
.
NOT_STARTED
;
let
bytestream
=
null
;
let
allLocalFiles
=
null
;
let
logToConsole
=
false
;
// Progress variables.
let
currentFilename
=
""
;
let
currentFileNumber
=
0
;
let
currentBytesUnarchivedInFile
=
0
;
let
currentBytesUnarchived
=
0
;
let
totalUncompressedBytesInArchive
=
0
;
let
totalFilesInArchive
=
0
;
var
currentFilename
=
""
;
var
currentFileNumber
=
0
;
var
currentBytesUnarchivedInFile
=
0
;
var
currentBytesUnarchived
=
0
;
var
totalUncompressedBytesInArchive
=
0
;
var
totalFilesInArchive
=
0
;
// Helper functions.
const
info
=
function
(
str
)
{
var
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
const
err
=
function
(
str
)
{
var
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
const
postProgress
=
function
()
{
var
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
bytestream
.
getNumBytesRead
(),
));
totalFilesInArchive
));
};
// Removes all characters from the first zero-byte in the string onwards.
const
readCleanString
=
function
(
bstr
,
numBytes
)
{
const
str
=
bstr
.
readString
(
numBytes
);
const
zIndex
=
str
.
indexOf
(
String
.
fromCharCode
(
0
));
var
readCleanString
=
function
(
bstr
,
numBytes
)
{
var
str
=
bstr
.
readString
(
numBytes
);
var
zIndex
=
str
.
indexOf
(
String
.
fromCharCode
(
0
));
return
zIndex
!=
-
1
?
str
.
substr
(
0
,
zIndex
)
:
str
;
};
class
TarLocalFile
{
// takes a ByteStream and parses out the local file information
constructor
(
bstream
)
{
this
.
isValid
=
false
;
let
bytesRead
=
0
;
// Read in the header block
this
.
name
=
readCleanString
(
bstream
,
100
);
this
.
mode
=
readCleanString
(
bstream
,
8
);
this
.
uid
=
readCleanString
(
bstream
,
8
);
this
.
gid
=
readCleanString
(
bstream
,
8
);
this
.
size
=
parseInt
(
readCleanString
(
bstream
,
12
),
8
);
this
.
mtime
=
readCleanString
(
bstream
,
12
);
this
.
chksum
=
readCleanString
(
bstream
,
8
);
this
.
typeflag
=
readCleanString
(
bstream
,
1
);
this
.
linkname
=
readCleanString
(
bstream
,
100
);
this
.
maybeMagic
=
readCleanString
(
bstream
,
6
);
if
(
this
.
maybeMagic
==
"ustar"
)
{
this
.
version
=
readCleanString
(
bstream
,
2
);
this
.
uname
=
readCleanString
(
bstream
,
32
);
this
.
gname
=
readCleanString
(
bstream
,
32
);
this
.
devmajor
=
readCleanString
(
bstream
,
8
);
this
.
devminor
=
readCleanString
(
bstream
,
8
);
this
.
prefix
=
readCleanString
(
bstream
,
155
);
if
(
this
.
prefix
.
length
)
{
this
.
name
=
this
.
prefix
+
this
.
name
;
}
bstream
.
readBytes
(
12
);
// 512 - 500
}
else
{
bstream
.
readBytes
(
255
);
// 512 - 257
// takes a ByteStream and parses out the local file information
var
TarLocalFile
=
function
(
bstream
)
{
this
.
isValid
=
false
;
// Read in the header block
this
.
name
=
readCleanString
(
bstream
,
100
);
this
.
mode
=
readCleanString
(
bstream
,
8
);
this
.
uid
=
readCleanString
(
bstream
,
8
);
this
.
gid
=
readCleanString
(
bstream
,
8
);
this
.
size
=
parseInt
(
readCleanString
(
bstream
,
12
),
8
);
this
.
mtime
=
readCleanString
(
bstream
,
12
);
this
.
chksum
=
readCleanString
(
bstream
,
8
);
this
.
typeflag
=
readCleanString
(
bstream
,
1
);
this
.
linkname
=
readCleanString
(
bstream
,
100
);
this
.
maybeMagic
=
readCleanString
(
bstream
,
6
);
if
(
this
.
maybeMagic
==
"ustar"
)
{
this
.
version
=
readCleanString
(
bstream
,
2
);
this
.
uname
=
readCleanString
(
bstream
,
32
);
this
.
gname
=
readCleanString
(
bstream
,
32
);
this
.
devmajor
=
readCleanString
(
bstream
,
8
);
this
.
devminor
=
readCleanString
(
bstream
,
8
);
this
.
prefix
=
readCleanString
(
bstream
,
155
);
if
(
this
.
prefix
.
length
)
{
this
.
name
=
this
.
prefix
+
this
.
name
;
}
bstream
.
readBytes
(
12
);
// 512 - 500
}
else
{
bstream
.
readBytes
(
255
);
// 512 - 257
}
// Done header, now rest of blocks are the file contents.
this
.
filename
=
this
.
name
;
this
.
fileData
=
null
;
info
(
"Untarring file '"
+
this
.
filename
+
"'"
);
info
(
" size = "
+
this
.
size
);
info
(
" typeflag = "
+
this
.
typeflag
);
// A regular file.
if
(
this
.
typeflag
==
0
)
{
info
(
" This is a regular file."
);
var
sizeInBytes
=
parseInt
(
this
.
size
);
this
.
fileData
=
new
Uint8Array
(
bstream
.
bytes
.
buffer
,
bstream
.
ptr
,
this
.
size
);
if
(
this
.
name
.
length
>
0
&&
this
.
size
>
0
&&
this
.
fileData
&&
this
.
fileData
.
buffer
)
{
this
.
isValid
=
true
;
}
bytesRead
+=
512
;
// Done header, now rest of blocks are the file contents.
this
.
filename
=
this
.
name
;
this
.
fileData
=
null
;
info
(
"Untarring file '"
+
this
.
filename
+
"'"
);
info
(
" size = "
+
this
.
size
);
info
(
" typeflag = "
+
this
.
typeflag
);
// A regular file.
if
(
this
.
typeflag
==
0
)
{
info
(
" This is a regular file."
);
const
sizeInBytes
=
parseInt
(
this
.
size
);
this
.
fileData
=
new
Uint8Array
(
bstream
.
readBytes
(
sizeInBytes
));
bytesRead
+=
sizeInBytes
;
if
(
this
.
name
.
length
>
0
&&
this
.
size
>
0
&&
this
.
fileData
&&
this
.
fileData
.
buffer
)
{
this
.
isValid
=
true
;
}
// Round up to 512-byte blocks.
const
remaining
=
512
-
bytesRead
%
512
;
if
(
remaining
>
0
&&
remaining
<
512
)
{
bstream
.
readBytes
(
remaining
);
}
}
else
if
(
this
.
typeflag
==
5
)
{
info
(
" This is a directory."
)
bstream
.
readBytes
(
this
.
size
);
// Round up to 512-byte blocks.
var
remaining
=
512
-
bstream
.
ptr
%
512
;
if
(
remaining
>
0
&&
remaining
<
512
)
{
bstream
.
readBytes
(
remaining
);
}
}
else
if
(
this
.
typeflag
==
5
)
{
info
(
" This is a directory."
)
}
}
}
;
const
untar
=
function
()
{
let
bstream
=
bytestream
.
tee
();
// Takes an ArrayBuffer of a tar file in
// returns null on error
// returns an array of DecompressedFile objects on success
var
untar
=
function
(
arrayBuffer
)
{
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
var
bstream
=
new
bitjs
.
io
.
ByteStream
(
arrayBuffer
);
var
localFiles
=
[];
// While we don't encounter an empty block, keep making TarLocalFiles.
while
(
bstream
.
peekNumber
(
4
)
!=
0
)
{
const
oneLocalFile
=
new
TarLocalFile
(
bstream
);
var
oneLocalFile
=
new
TarLocalFile
(
bstream
);
if
(
oneLocalFile
&&
oneLocalFile
.
isValid
)
{
// If we make it to this point and haven't thrown an error, we have successfully
// read in the data for a local file, so we can update the actual bytestream.
bytestream
=
bstream
.
tee
();
allLocalFiles
.
push
(
oneLocalFile
);
localFiles
.
push
(
oneLocalFile
);
totalUncompressedBytesInArchive
+=
oneLocalFile
.
size
;
// update progress
currentFilename
=
oneLocalFile
.
filename
;
currentFileNumber
=
totalFilesInArchive
++
;
currentBytesUnarchivedInFile
=
oneLocalFile
.
size
;
currentBytesUnarchived
+=
oneLocalFile
.
size
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
oneLocalFile
));
postProgress
();
}
}
totalFilesInArchive
=
allLocalFiles
.
length
;
totalFilesInArchive
=
localFiles
.
length
;
// got all local files, now sort them
localFiles
.
sort
(
function
(
a
,
b
)
{
var
aname
=
a
.
filename
;
var
bname
=
b
.
filename
;
return
aname
>
bname
?
1
:
-
1
;
// extract the number at the end of both filenames
/*
var aname = a.filename;
var bname = b.filename;
var aindex = aname.length, bindex = bname.length;
// Find the last number character from the back of the filename.
while (aname[aindex-1] < '0' || aname[aindex-1] > '9') --aindex;
while (bname[bindex-1] < '0' || bname[bindex-1] > '9') --bindex;
// Find the first number character from the back of the filename
while (aname[aindex-1] >= '0' && aname[aindex-1] <= '9') --aindex;
while (bname[bindex-1] >= '0' && bname[bindex-1] <= '9') --bindex;
// parse them into numbers and return comparison
var anum = parseInt(aname.substr(aindex), 10),
bnum = parseInt(bname.substr(bindex), 10);
return anum - bnum;
*/
});
// report # files and total length
if
(
localFiles
.
length
>
0
)
{
postProgress
();
}
// now do the shipping of each file
for
(
var
i
=
0
;
i
<
localFiles
.
length
;
++
i
)
{
var
localfile
=
localFiles
[
i
];
info
(
"Sending file '"
+
localfile
.
filename
+
"' up"
);
// update progress
currentFilename
=
localfile
.
filename
;
currentFileNumber
=
i
;
currentBytesUnarchivedInFile
=
localfile
.
size
;
currentBytesUnarchived
+=
localfile
.
size
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localfile
));
postProgress
();
}
postProgress
();
bytestream
=
bstream
.
tee
(
);
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
()
);
};
// event.data.file has the first ArrayBuffer.
// event.data.bytes has all subsequent ArrayBuffers.
// event.data.file has the ArrayBuffer.
onmessage
=
function
(
event
)
{
const
bytes
=
event
.
data
.
file
||
event
.
data
.
bytes
;
logToConsole
=
!!
event
.
data
.
logToConsole
;
// This is the very first time we have been called. Initialize the bytestream.
if
(
!
bytestream
)
{
bytestream
=
new
bitjs
.
io
.
ByteStream
(
bytes
);
}
else
{
bytestream
.
push
(
bytes
);
}
if
(
unarchiveState
===
UnarchiveState
.
NOT_STARTED
)
{
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
allLocalFiles
=
[];
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
unarchiveState
=
UnarchiveState
.
UNARCHIVING
;
postProgress
();
}
if
(
unarchiveState
===
UnarchiveState
.
UNARCHIVING
||
unarchiveState
===
UnarchiveState
.
WAITING
)
{
try
{
untar
();
unarchiveState
=
UnarchiveState
.
FINISHED
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
}
else
{
console
.
error
(
'Found an error while untarring'
);
console
.
dir
(
e
);
throw
e
;
}
}
}
var
ab
=
event
.
data
.
file
;
untar
(
ab
);
};
cps/static/js/archive/unzip.js
View file @
7982ed87
...
...
@@ -18,65 +18,42 @@ importScripts('../io/bytebuffer.js');
importScripts
(
'../io/bytestream.js'
);
importScripts
(
'archive.js'
);
const
UnarchiveState
=
{
NOT_STARTED
:
0
,
UNARCHIVING
:
1
,
WAITING
:
2
,
FINISHED
:
3
,
};
// State - consider putting these into a class.
let
unarchiveState
=
UnarchiveState
.
NOT_STARTED
;
let
bytestream
=
null
;
let
allLocalFiles
=
null
;
let
logToConsole
=
false
;
// Progress variables.
let
currentFilename
=
""
;
let
currentFileNumber
=
0
;
let
currentBytesUnarchivedInFile
=
0
;
let
currentBytesUnarchived
=
0
;
let
totalUncompressedBytesInArchive
=
0
;
let
totalFilesInArchive
=
0
;
var
currentFilename
=
""
;
var
currentFileNumber
=
0
;
var
currentBytesUnarchivedInFile
=
0
;
var
currentBytesUnarchived
=
0
;
var
totalUncompressedBytesInArchive
=
0
;
var
totalFilesInArchive
=
0
;
// Helper functions.
const
info
=
function
(
str
)
{
var
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
const
err
=
function
(
str
)
{
var
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
const
postProgress
=
function
()
{
var
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
bytestream
.
getNumBytesRead
(),
));
totalFilesInArchive
));
};
const
zLocalFileHeaderSignature
=
0x04034b50
;
const
zArchiveExtraDataSignature
=
0x08064b50
;
const
zCentralFileHeaderSignature
=
0x02014b50
;
const
zDigitalSignatureSignature
=
0x05054b50
;
const
zEndOfCentralDirSignature
=
0x06064b50
;
const
zEndOfCentralDirLocatorSignature
=
0x07064b50
;
var
zLocalFileHeaderSignature
=
0x04034b50
;
var
zArchiveExtraDataSignature
=
0x08064b50
;
var
zCentralFileHeaderSignature
=
0x02014b50
;
var
zDigitalSignatureSignature
=
0x05054b50
;
var
zEndOfCentralDirSignature
=
0x06064b50
;
var
zEndOfCentralDirLocatorSignature
=
0x07064b50
;
// mask for getting the Nth bit (zero-based)
const
BIT
=
[
0x01
,
0x02
,
0x04
,
0x08
,
0x10
,
0x20
,
0x40
,
0x80
,
0x100
,
0x200
,
0x400
,
0x800
,
0x1000
,
0x2000
,
0x4000
,
0x8000
];
class
ZipLocalFile
{
// takes a ByteStream and parses out the local file information
constructor
(
bstream
)
{
// takes a ByteStream and parses out the local file information
var
ZipLocalFile
=
function
(
bstream
)
{
if
(
typeof
bstream
!=
typeof
{}
||
!
bstream
.
readNumber
||
typeof
bstream
.
readNumber
!=
typeof
function
(){})
{
return
null
;
return
null
;
}
bstream
.
readNumber
(
4
);
// swallow signature
...
...
@@ -93,69 +70,197 @@ class ZipLocalFile {
this
.
filename
=
null
;
if
(
this
.
fileNameLength
>
0
)
{
this
.
filename
=
bstream
.
readString
(
this
.
fileNameLength
);
this
.
filename
=
bstream
.
readString
(
this
.
fileNameLength
);
}
info
(
"Zip Local File Header:"
);
info
(
" version="
+
this
.
version
);
info
(
" general purpose="
+
this
.
generalPurpose
);
info
(
" compression method="
+
this
.
compressionMethod
);
info
(
" last mod file time="
+
this
.
lastModFileTime
);
info
(
" last mod file date="
+
this
.
lastModFileDate
);
info
(
" crc32="
+
this
.
crc32
);
info
(
" compressed size="
+
this
.
compressedSize
);
info
(
" uncompressed size="
+
this
.
uncompressedSize
);
info
(
" file name length="
+
this
.
fileNameLength
);
info
(
" extra field length="
+
this
.
extraFieldLength
);
info
(
" filename = '"
+
this
.
filename
+
"'"
);
this
.
extraField
=
null
;
if
(
this
.
extraFieldLength
>
0
)
{
this
.
extraField
=
bstream
.
readString
(
this
.
extraFieldLength
);
//
info(" extra field=" + this.extraField);
this
.
extraField
=
bstream
.
readString
(
this
.
extraFieldLength
);
info
(
" extra field="
+
this
.
extraField
);
}
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
compressedSize
>
0
)
{
this
.
fileData
=
new
Uint8Array
(
bstream
.
readBytes
(
this
.
compressedSize
));
this
.
fileData
=
new
Uint8Array
(
bstream
.
bytes
.
buffer
,
bstream
.
ptr
,
this
.
compressedSize
);
bstream
.
ptr
+=
this
.
compressedSize
;
}
// TODO: deal with data descriptor if present (we currently assume no data descriptor!)
// "This descriptor exists only if bit 3 of the general purpose bit flag is set"
// But how do you figure out how big the file data is if you don't know the compressedSize
// from the header?!?
if
((
this
.
generalPurpose
&
BIT
[
3
])
!=
0
)
{
this
.
crc32
=
bstream
.
readNumber
(
4
);
this
.
compressedSize
=
bstream
.
readNumber
(
4
);
this
.
uncompressedSize
=
bstream
.
readNumber
(
4
);
if
((
this
.
generalPurpose
&
bitjs
.
BIT
[
3
])
!=
0
)
{
this
.
crc32
=
bstream
.
readNumber
(
4
);
this
.
compressedSize
=
bstream
.
readNumber
(
4
);
this
.
uncompressedSize
=
bstream
.
readNumber
(
4
);
}
};
// Now that we have all the bytes for this file, we can print out some information.
if
(
logToConsole
)
{
info
(
"Zip Local File Header:"
);
info
(
" version="
+
this
.
version
);
info
(
" general purpose="
+
this
.
generalPurpose
);
info
(
" compression method="
+
this
.
compressionMethod
);
info
(
" last mod file time="
+
this
.
lastModFileTime
);
info
(
" last mod file date="
+
this
.
lastModFileDate
);
info
(
" crc32="
+
this
.
crc32
);
info
(
" compressed size="
+
this
.
compressedSize
);
info
(
" uncompressed size="
+
this
.
uncompressedSize
);
info
(
" file name length="
+
this
.
fileNameLength
);
info
(
" extra field length="
+
this
.
extraFieldLength
);
info
(
" filename = '"
+
this
.
filename
+
"'"
);
}
// determine what kind of compressed data we have and decompress
ZipLocalFile
.
prototype
.
unzip
=
function
()
{
// Zip Version 1.0, no compression (store only)
if
(
this
.
compressionMethod
==
0
)
{
info
(
"ZIP v"
+
this
.
version
+
", store only: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
currentBytesUnarchivedInFile
=
this
.
compressedSize
;
currentBytesUnarchived
+=
this
.
compressedSize
;
}
// version == 20, compression method == 8 (DEFLATE)
else
if
(
this
.
compressionMethod
==
8
)
{
info
(
"ZIP v2.0, DEFLATE: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
this
.
fileData
=
inflate
(
this
.
fileData
,
this
.
uncompressedSize
);
}
else
{
err
(
"UNSUPPORTED VERSION/FORMAT: ZIP v"
+
this
.
version
+
", compression method="
+
this
.
compressionMethod
+
": "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
this
.
fileData
=
null
;
}
};
// determine what kind of compressed data we have and decompress
unzip
()
{
// Zip Version 1.0, no compression (store only)
if
(
this
.
compressionMethod
==
0
)
{
if
(
logToConsole
)
{
info
(
"ZIP v"
+
this
.
version
+
", store only: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
}
currentBytesUnarchivedInFile
=
this
.
compressedSize
;
currentBytesUnarchived
+=
this
.
compressedSize
;
}
// version == 20, compression method == 8 (DEFLATE)
else
if
(
this
.
compressionMethod
==
8
)
{
if
(
logToConsole
)
{
info
(
"ZIP v2.0, DEFLATE: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
}
this
.
fileData
=
inflate
(
this
.
fileData
,
this
.
uncompressedSize
);
}
else
{
err
(
"UNSUPPORTED VERSION/FORMAT: ZIP v"
+
this
.
version
+
", compression method="
+
this
.
compressionMethod
+
": "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
this
.
fileData
=
null
;
}
// Takes an ArrayBuffer of a zip file in
// returns null on error
// returns an array of DecompressedFile objects on success
var
unzip
=
function
(
arrayBuffer
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
currentBytesUnarchived
=
0
;
var
bstream
=
new
bitjs
.
io
.
ByteStream
(
arrayBuffer
);
// detect local file header signature or return null
if
(
bstream
.
peekNumber
(
4
)
==
zLocalFileHeaderSignature
)
{
var
localFiles
=
[];
// loop until we don't see any more local files
while
(
bstream
.
peekNumber
(
4
)
==
zLocalFileHeaderSignature
)
{
var
oneLocalFile
=
new
ZipLocalFile
(
bstream
);
// this should strip out directories/folders
if
(
oneLocalFile
&&
oneLocalFile
.
uncompressedSize
>
0
&&
oneLocalFile
.
fileData
)
{
localFiles
.
push
(
oneLocalFile
);
totalUncompressedBytesInArchive
+=
oneLocalFile
.
uncompressedSize
;
}
}
totalFilesInArchive
=
localFiles
.
length
;
// got all local files, now sort them
localFiles
.
sort
(
function
(
a
,
b
)
{
var
aname
=
a
.
filename
;
var
bname
=
b
.
filename
;
return
aname
>
bname
?
1
:
-
1
;
// extract the number at the end of both filenames
/*
var aname = a.filename;
var bname = b.filename;
var aindex = aname.length, bindex = bname.length;
// Find the last number character from the back of the filename.
while (aname[aindex-1] < '0' || aname[aindex-1] > '9') --aindex;
while (bname[bindex-1] < '0' || bname[bindex-1] > '9') --bindex;
// Find the first number character from the back of the filename
while (aname[aindex-1] >= '0' && aname[aindex-1] <= '9') --aindex;
while (bname[bindex-1] >= '0' && bname[bindex-1] <= '9') --bindex;
// parse them into numbers and return comparison
var anum = parseInt(aname.substr(aindex), 10),
bnum = parseInt(bname.substr(bindex), 10);
return anum - bnum;
*/
});
// archive extra data record
if
(
bstream
.
peekNumber
(
4
)
==
zArchiveExtraDataSignature
)
{
info
(
" Found an Archive Extra Data Signature"
);
// skipping this record for now
bstream
.
readNumber
(
4
);
var
archiveExtraFieldLength
=
bstream
.
readNumber
(
4
);
bstream
.
readString
(
archiveExtraFieldLength
);
}
// central directory structure
// TODO: handle the rest of the structures (Zip64 stuff)
if
(
bstream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
info
(
" Found a Central File Header"
);
// read all file headers
while
(
bstream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
bstream
.
readNumber
(
4
);
// signature
bstream
.
readNumber
(
2
);
// version made by
bstream
.
readNumber
(
2
);
// version needed to extract
bstream
.
readNumber
(
2
);
// general purpose bit flag
bstream
.
readNumber
(
2
);
// compression method
bstream
.
readNumber
(
2
);
// last mod file time
bstream
.
readNumber
(
2
);
// last mod file date
bstream
.
readNumber
(
4
);
// crc32
bstream
.
readNumber
(
4
);
// compressed size
bstream
.
readNumber
(
4
);
// uncompressed size
var
fileNameLength
=
bstream
.
readNumber
(
2
);
// file name length
var
extraFieldLength
=
bstream
.
readNumber
(
2
);
// extra field length
var
fileCommentLength
=
bstream
.
readNumber
(
2
);
// file comment length
bstream
.
readNumber
(
2
);
// disk number start
bstream
.
readNumber
(
2
);
// internal file attributes
bstream
.
readNumber
(
4
);
// external file attributes
bstream
.
readNumber
(
4
);
// relative offset of local header
bstream
.
readString
(
fileNameLength
);
// file name
bstream
.
readString
(
extraFieldLength
);
// extra field
bstream
.
readString
(
fileCommentLength
);
// file comment
}
}
// digital signature
if
(
bstream
.
peekNumber
(
4
)
==
zDigitalSignatureSignature
)
{
info
(
" Found a Digital Signature"
);
bstream
.
readNumber
(
4
);
var
sizeOfSignature
=
bstream
.
readNumber
(
2
);
bstream
.
readString
(
sizeOfSignature
);
// digital signature data
}
// report # files and total length
if
(
localFiles
.
length
>
0
)
{
postProgress
();
}
// now do the unzipping of each file
for
(
var
i
=
0
;
i
<
localFiles
.
length
;
++
i
)
{
var
localfile
=
localFiles
[
i
];
// update progress
currentFilename
=
localfile
.
filename
;
currentFileNumber
=
i
;
currentBytesUnarchivedInFile
=
0
;
// actually do the unzipping
localfile
.
unzip
();
if
(
localfile
.
fileData
!=
null
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localfile
));
postProgress
();
}
}
postProgress
();
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
}
...
...
@@ -163,57 +268,57 @@ class ZipLocalFile {
// each entry's index is its code and its value is a JavaScript object
// containing {length: 6, symbol: X}
function
getHuffmanCodes
(
bitLengths
)
{
// ensure bitLengths is an array containing at least one element
if
(
typeof
bitLengths
!=
typeof
[]
||
bitLengths
.
length
<
1
)
{
err
(
"Error! getHuffmanCodes() called with an invalid array"
);
return
null
;
}
// ensure bitLengths is an array containing at least one element
if
(
typeof
bitLengths
!=
typeof
[]
||
bitLengths
.
length
<
1
)
{
err
(
"Error! getHuffmanCodes() called with an invalid array"
);
return
null
;
}
// Reference: http://tools.ietf.org/html/rfc1951#page-8
var
numLengths
=
bitLengths
.
length
,
bl_count
=
[],
MAX_BITS
=
1
;
// Step 1: count up how many codes of each length we have
for
(
var
i
=
0
;
i
<
numLengths
;
++
i
)
{
var
length
=
bitLengths
[
i
];
// test to ensure each bit length is a positive, non-zero number
if
(
typeof
length
!=
typeof
1
||
length
<
0
)
{
err
(
"bitLengths contained an invalid number in getHuffmanCodes(): "
+
length
+
" of type "
+
(
typeof
length
));
return
null
;
}
// increment the appropriate bitlength count
if
(
bl_count
[
length
]
==
undefined
)
bl_count
[
length
]
=
0
;
// a length of zero means this symbol is not participating in the huffman coding
if
(
length
>
0
)
bl_count
[
length
]
++
;
// Reference: http://tools.ietf.org/html/rfc1951#page-8
const
numLengths
=
bitLengths
.
length
;
const
bl_count
=
[];
let
MAX_BITS
=
1
;
// Step 1: count up how many codes of each length we have
for
(
let
i
=
0
;
i
<
numLengths
;
++
i
)
{
const
length
=
bitLengths
[
i
];
// test to ensure each bit length is a positive, non-zero number
if
(
typeof
length
!=
typeof
1
||
length
<
0
)
{
err
(
"bitLengths contained an invalid number in getHuffmanCodes(): "
+
length
+
" of type "
+
(
typeof
length
));
return
null
;
if
(
length
>
MAX_BITS
)
MAX_BITS
=
length
;
}
// increment the appropriate bitlength count
if
(
bl_count
[
length
]
==
undefined
)
bl_count
[
length
]
=
0
;
// a length of zero means this symbol is not participating in the huffman coding
if
(
length
>
0
)
bl_count
[
length
]
++
;
if
(
length
>
MAX_BITS
)
MAX_BITS
=
length
;
}
// Step 2: Find the numerical value of the smallest code for each code length
const
next_code
=
[];
let
code
=
0
;
for
(
let
bits
=
1
;
bits
<=
MAX_BITS
;
++
bits
)
{
const
length
=
bits
-
1
;
// ensure undefined lengths are zero
if
(
bl_count
[
length
]
==
undefined
)
bl_count
[
length
]
=
0
;
code
=
(
code
+
bl_count
[
bits
-
1
])
<<
1
;
next_code
[
bits
]
=
code
;
}
// Step 2: Find the numerical value of the smallest code for each code length
var
next_code
=
[],
code
=
0
;
for
(
var
bits
=
1
;
bits
<=
MAX_BITS
;
++
bits
)
{
var
length
=
bits
-
1
;
// ensure undefined lengths are zero
if
(
bl_count
[
length
]
==
undefined
)
bl_count
[
length
]
=
0
;
code
=
(
code
+
bl_count
[
bits
-
1
])
<<
1
;
next_code
[
bits
]
=
code
;
}
// Step 3: Assign numerical values to all codes
const
table
=
{}
;
let
tableLength
=
0
;
for
(
let
n
=
0
;
n
<
numLengths
;
++
n
)
{
const
len
=
bitLengths
[
n
];
if
(
len
!=
0
)
{
table
[
next_code
[
len
]]
=
{
length
:
len
,
symbol
:
n
};
//, bitstring: binaryValueToString(next_code[len],len) }
;
tableLength
++
;
next_code
[
len
]
++
;
// Step 3: Assign numerical values to all codes
var
table
=
{},
tableLength
=
0
;
for
(
var
n
=
0
;
n
<
numLengths
;
++
n
)
{
var
len
=
bitLengths
[
n
];
if
(
len
!=
0
)
{
table
[
next_code
[
len
]]
=
{
length
:
len
,
symbol
:
n
};
//, bitstring: binaryValueToString(next_code[len],len) };
tableLength
++
;
next_code
[
len
]
++
;
}
}
}
table
.
maxLength
=
tableLength
;
table
.
maxLength
=
tableLength
;
return
table
;
return
table
;
}
/*
...
...
@@ -233,16 +338,16 @@ function getHuffmanCodes(bitLengths) {
11000111
*/
// fixed Huffman codes go from 7-9 bits, so we need an array whose index can hold up to 9 bits
let
fixedHCtoLiteral
=
null
;
let
fixedHCtoDistance
=
null
;
var
fixedHCtoLiteral
=
null
;
var
fixedHCtoDistance
=
null
;
function
getFixedLiteralTable
()
{
// create once
if
(
!
fixedHCtoLiteral
)
{
const
bitlengths
=
new
Array
(
288
);
for
(
let
i
=
0
;
i
<=
143
;
++
i
)
bitlengths
[
i
]
=
8
;
for
(
let
i
=
144
;
i
<=
255
;
++
i
)
bitlengths
[
i
]
=
9
;
for
(
let
i
=
256
;
i
<=
279
;
++
i
)
bitlengths
[
i
]
=
7
;
for
(
let
i
=
280
;
i
<=
287
;
++
i
)
bitlengths
[
i
]
=
8
;
var
bitlengths
=
new
Array
(
288
);
for
(
var
i
=
0
;
i
<=
143
;
++
i
)
bitlengths
[
i
]
=
8
;
for
(
i
=
144
;
i
<=
255
;
++
i
)
bitlengths
[
i
]
=
9
;
for
(
i
=
256
;
i
<=
279
;
++
i
)
bitlengths
[
i
]
=
7
;
for
(
i
=
280
;
i
<=
287
;
++
i
)
bitlengths
[
i
]
=
8
;
// get huffman code table
fixedHCtoLiteral
=
getHuffmanCodes
(
bitlengths
);
...
...
@@ -251,89 +356,86 @@ function getFixedLiteralTable() {
}
function
getFixedDistanceTable
()
{
// create once
if
(
!
fixedHCtoDistance
)
{
const
bitlengths
=
new
Array
(
32
);
for
(
let
i
=
0
;
i
<
32
;
++
i
)
{
bitlengths
[
i
]
=
5
;
}
// create once
if
(
!
fixedHCtoDistance
)
{
var
bitlengths
=
new
Array
(
32
);
for
(
var
i
=
0
;
i
<
32
;
++
i
)
{
bitlengths
[
i
]
=
5
;
}
// get huffman code table
fixedHCtoDistance
=
getHuffmanCodes
(
bitlengths
);
}
return
fixedHCtoDistance
;
// get huffman code table
fixedHCtoDistance
=
getHuffmanCodes
(
bitlengths
);
}
return
fixedHCtoDistance
;
}
// extract one bit at a time until we find a matching Huffman Code
// then return that symbol
function
decodeSymbol
(
bstream
,
hcTable
)
{
let
code
=
0
;
let
len
=
0
;
let
match
=
false
;
// loop until we match
for
(;;)
{
// read in next bit
const
bit
=
bstream
.
readBits
(
1
)
;
code
=
(
code
<<
1
)
|
bit
;
++
len
;
// check against Huffman Code table and break if found
if
(
hcTable
.
hasOwnProperty
(
code
)
&&
hcTable
[
code
].
length
==
len
)
{
break
;
}
if
(
len
>
hcTable
.
maxLength
)
{
err
(
"Bit stream out of sync, didn't find a Huffman Code, length was "
+
len
+
" and table only max code length of "
+
hcTable
.
maxLength
)
;
break
;
var
code
=
0
,
len
=
0
;
var
match
=
false
;
// loop until we match
for
(;;)
{
// read in next bit
var
bit
=
bstream
.
readBits
(
1
);
code
=
(
code
<<
1
)
|
bit
;
++
len
;
// check against Huffman Code table and break if found
if
(
hcTable
.
hasOwnProperty
(
code
)
&&
hcTable
[
code
].
length
==
len
)
{
break
;
}
if
(
len
>
hcTable
.
maxLength
)
{
err
(
"Bit stream out of sync, didn't find a Huffman Code, length was "
+
len
+
" and table only max code length of "
+
hcTable
.
maxLength
);
break
;
}
}
}
return
hcTable
[
code
].
symbol
;
return
hcTable
[
code
].
symbol
;
}
const
CodeLengthCodeOrder
=
[
16
,
17
,
18
,
0
,
8
,
7
,
9
,
6
,
10
,
5
,
11
,
4
,
12
,
3
,
13
,
2
,
14
,
1
,
15
];
/*
Extra Extra Extra
Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
---- ---- ------ ---- ---- ------- ---- ---- -------
257 0 3 267 1 15,16 277 4 67-82
258 0 4 268 1 17,18 278 4 83-98
259 0 5 269 2 19-22 279 4 99-114
260 0 6 270 2 23-26 280 4 115-130
261 0 7 271 2 27-30 281 5 131-162
262 0 8 272 2 31-34 282 5 163-194
263 0 9 273 3 35-42 283 5 195-226
264 0 10 274 3 43-50 284 5 227-257
265 1 11,12 275 3 51-58 285 0 258
266 1 13,14 276 3 59-66
*/
const
LengthLookupTable
=
[
[
0
,
3
],
[
0
,
4
],
[
0
,
5
],
[
0
,
6
],
[
0
,
7
],
[
0
,
8
],
[
0
,
9
],
[
0
,
10
],
[
1
,
11
],
[
1
,
13
],
[
1
,
15
],
[
1
,
17
],
[
2
,
19
],
[
2
,
23
],
[
2
,
27
],
[
2
,
31
],
[
3
,
35
],
[
3
,
43
],
[
3
,
51
],
[
3
,
59
],
[
4
,
67
],
[
4
,
83
],
[
4
,
99
],
[
4
,
115
],
[
5
,
131
],
[
5
,
163
],
[
5
,
195
],
[
5
,
227
],
[
0
,
258
]
var
CodeLengthCodeOrder
=
[
16
,
17
,
18
,
0
,
8
,
7
,
9
,
6
,
10
,
5
,
11
,
4
,
12
,
3
,
13
,
2
,
14
,
1
,
15
];
/*
Extra Extra Extra
Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
---- ---- ------ ---- ---- ------- ---- ---- -------
257 0 3 267 1 15,16 277 4 67-82
258 0 4 268 1 17,18 278 4 83-98
259 0 5 269 2 19-22 279 4 99-114
260 0 6 270 2 23-26 280 4 115-130
261 0 7 271 2 27-30 281 5 131-162
262 0 8 272 2 31-34 282 5 163-194
263 0 9 273 3 35-42 283 5 195-226
264 0 10 274 3 43-50 284 5 227-257
265 1 11,12 275 3 51-58 285 0 258
266 1 13,14 276 3 59-66
*/
var
LengthLookupTable
=
[
[
0
,
3
],
[
0
,
4
],
[
0
,
5
],
[
0
,
6
],
[
0
,
7
],
[
0
,
8
],
[
0
,
9
],
[
0
,
10
],
[
1
,
11
],
[
1
,
13
],
[
1
,
15
],
[
1
,
17
],
[
2
,
19
],
[
2
,
23
],
[
2
,
27
],
[
2
,
31
],
[
3
,
35
],
[
3
,
43
],
[
3
,
51
],
[
3
,
59
],
[
4
,
67
],
[
4
,
83
],
[
4
,
99
],
[
4
,
115
],
[
5
,
131
],
[
5
,
163
],
[
5
,
195
],
[
5
,
227
],
[
0
,
258
]
];
/*
Extra Extra Extra
Code Bits Dist Code Bits Dist Code Bits Distance
---- ---- ---- ---- ---- ------ ---- ---- --------
0 0 1 10 4 33-48 20 9 1025-1536
1 0 2 11 4 49-64 21 9 1537-2048
2 0 3 12 5 65-96 22 10 2049-3072
3 0 4 13 5 97-128 23 10 3073-4096
4 1 5,6 14 6 129-192 24 11 4097-6144
5 1 7,8 15 6 193-256 25 11 6145-8192
6 2 9-12 16 7 257-384 26 12 8193-12288
7 2 13-16 17 7 385-512 27 12 12289-16384
8 3 17-24 18 8 513-768 28 13 16385-24576
9 3 25-32 19 8 769-1024 29 13 24577-32768
*/
const
DistLookupTable
=
[
/*
Extra Extra Extra
Code Bits Dist Code Bits Dist Code Bits Distance
---- ---- ---- ---- ---- ------ ---- ---- --------
0 0 1 10 4 33-48 20 9 1025-1536
1 0 2 11 4 49-64 21 9 1537-2048
2 0 3 12 5 65-96 22 10 2049-3072
3 0 4 13 5 97-128 23 10 3073-4096
4 1 5,6 14 6 129-192 24 11 4097-6144
5 1 7,8 15 6 193-256 25 11 6145-8192
6 2 9-12 16 7 257-384 26 12 8193-12288
7 2 13-16 17 7 385-512 27 12 12289-16384
8 3 17-24 18 8 513-768 28 13 16385-24576
9 3 25-32 19 8 769-1024 29 13 24577-32768
*/
var
DistLookupTable
=
[
[
0
,
1
],
[
0
,
2
],
[
0
,
3
],
[
0
,
4
],
[
1
,
5
],
[
1
,
7
],
[
2
,
9
],
[
2
,
13
],
...
...
@@ -351,65 +453,66 @@ const DistLookupTable = [
];
function
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
)
{
/*
loop (until end of block code recognized)
decode literal/length value from input stream
if value < 256
copy value (literal byte) to output stream
otherwise
if value = end of block (256)
break from loop
otherwise (value = 257..285)
decode distance from input stream
move backwards distance bytes in the output
stream, and copy length bytes from this
position to the output stream.
*/
let
numSymbols
=
0
;
let
blockSize
=
0
;
for
(;;)
{
const
symbol
=
decodeSymbol
(
bstream
,
hcLiteralTable
);
++
numSymbols
;
if
(
symbol
<
256
)
{
// copy literal byte to output
buffer
.
insertByte
(
symbol
);
blockSize
++
;
}
else
{
// end of block reached
if
(
symbol
==
256
)
{
break
;
}
else
{
const
lengthLookup
=
LengthLookupTable
[
symbol
-
257
];
let
length
=
lengthLookup
[
1
]
+
bstream
.
readBits
(
lengthLookup
[
0
]);
const
distLookup
=
DistLookupTable
[
decodeSymbol
(
bstream
,
hcDistanceTable
)];
let
distance
=
distLookup
[
1
]
+
bstream
.
readBits
(
distLookup
[
0
]);
// now apply length and distance appropriately and copy to output
// TODO: check that backward distance < data.length?
// http://tools.ietf.org/html/rfc1951#page-11
// "Note also that the referenced string may overlap the current
// position; for example, if the last 2 bytes decoded have values
// X and Y, a string reference with <length = 5, distance = 2>
// adds X,Y,X,Y,X to the output stream."
//
// loop for each character
let
ch
=
buffer
.
ptr
-
distance
;
blockSize
+=
length
;
if
(
length
>
distance
)
{
const
data
=
buffer
.
data
;
while
(
length
--
)
{
buffer
.
insertByte
(
data
[
ch
++
]);
}
}
else
{
buffer
.
insertBytes
(
buffer
.
data
.
subarray
(
ch
,
ch
+
length
))
/*
loop (until end of block code recognized)
decode literal/length value from input stream
if value < 256
copy value (literal byte) to output stream
otherwise
if value = end of block (256)
break from loop
otherwise (value = 257..285)
decode distance from input stream
move backwards distance bytes in the output
stream, and copy length bytes from this
position to the output stream.
*/
var
numSymbols
=
0
,
blockSize
=
0
;
for
(;;)
{
var
symbol
=
decodeSymbol
(
bstream
,
hcLiteralTable
);
++
numSymbols
;
if
(
symbol
<
256
)
{
// copy literal byte to output
buffer
.
insertByte
(
symbol
);
blockSize
++
;
}
}
// length-distance pair
}
// length-distance pair or end-of-block
}
// loop until we reach end of block
return
blockSize
;
else
{
// end of block reached
if
(
symbol
==
256
)
{
break
;
}
else
{
var
lengthLookup
=
LengthLookupTable
[
symbol
-
257
],
length
=
lengthLookup
[
1
]
+
bstream
.
readBits
(
lengthLookup
[
0
]),
distLookup
=
DistLookupTable
[
decodeSymbol
(
bstream
,
hcDistanceTable
)],
distance
=
distLookup
[
1
]
+
bstream
.
readBits
(
distLookup
[
0
]);
// now apply length and distance appropriately and copy to output
// TODO: check that backward distance < data.length?
// http://tools.ietf.org/html/rfc1951#page-11
// "Note also that the referenced string may overlap the current
// position; for example, if the last 2 bytes decoded have values
// X and Y, a string reference with <length = 5, distance = 2>
// adds X,Y,X,Y,X to the output stream."
//
// loop for each character
var
ch
=
buffer
.
ptr
-
distance
;
blockSize
+=
length
;
if
(
length
>
distance
)
{
var
data
=
buffer
.
data
;
while
(
length
--
)
{
buffer
.
insertByte
(
data
[
ch
++
]);
}
}
else
{
buffer
.
insertBytes
(
buffer
.
data
.
subarray
(
ch
,
ch
+
length
))
}
}
// length-distance pair
}
// length-distance pair or end-of-block
}
// loop until we reach end of block
return
blockSize
;
}
// {Uint8Array} compressedData A Uint8Array of the compressed file data.
...
...
@@ -417,105 +520,110 @@ function inflateBlockData(bstream, hcLiteralTable, hcDistanceTable, buffer) {
// deflate: http://tools.ietf.org/html/rfc1951
function
inflate
(
compressedData
,
numDecompressedBytes
)
{
// Bit stream representing the compressed data.
const
bstream
=
new
bitjs
.
io
.
BitStream
(
compressedData
.
buffer
,
var
bstream
=
new
bitjs
.
io
.
BitStream
(
compressedData
.
buffer
,
false
/* rtl */
,
compressedData
.
byteOffset
,
compressedData
.
byteLength
);
const
buffer
=
new
bitjs
.
io
.
ByteBuffer
(
numDecompressedBytes
);
let
blockSize
=
0
;
var
buffer
=
new
bitjs
.
io
.
ByteBuffer
(
numDecompressedBytes
);
var
numBlocks
=
0
,
blockSize
=
0
;
// block format: http://tools.ietf.org/html/rfc1951#page-9
let
bFinal
=
0
;
do
{
bFinal
=
bstream
.
readBits
(
1
);
let
bType
=
bstream
.
readBits
(
2
);
blockSize
=
0
;
// no compression
if
(
bType
==
0
)
{
// skip remaining bits in this byte
while
(
bstream
.
bitPtr
!=
0
)
bstream
.
readBits
(
1
);
const
len
=
bstream
.
readBits
(
16
);
const
nlen
=
bstream
.
readBits
(
16
);
// TODO: check if nlen is the ones-complement of len?
if
(
len
>
0
)
buffer
.
insertBytes
(
bstream
.
readBytes
(
len
));
blockSize
=
len
;
}
// fixed Huffman codes
else
if
(
bType
==
1
)
{
blockSize
=
inflateBlockData
(
bstream
,
getFixedLiteralTable
(),
getFixedDistanceTable
(),
buffer
);
}
// dynamic Huffman codes
else
if
(
bType
==
2
)
{
const
numLiteralLengthCodes
=
bstream
.
readBits
(
5
)
+
257
;
const
numDistanceCodes
=
bstream
.
readBits
(
5
)
+
1
;
const
numCodeLengthCodes
=
bstream
.
readBits
(
4
)
+
4
;
// populate the array of code length codes (first de-compaction)
const
codeLengthsCodeLengths
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
for
(
let
i
=
0
;
i
<
numCodeLengthCodes
;
++
i
)
{
codeLengthsCodeLengths
[
CodeLengthCodeOrder
[
i
]
]
=
bstream
.
readBits
(
3
);
}
// get the Huffman Codes for the code lengths
const
codeLengthsCodes
=
getHuffmanCodes
(
codeLengthsCodeLengths
);
// now follow this mapping
/*
0 - 15: Represent code lengths of 0 - 15
16: Copy the previous code length 3 - 6 times.
The next 2 bits indicate repeat length
(0 = 3, ... , 3 = 6)
Example: Codes 8, 16 (+2 bits 11),
16 (+2 bits 10) will expand to
12 code lengths of 8 (1 + 6 + 5)
17: Repeat a code length of 0 for 3 - 10 times.
(3 bits of length)
18: Repeat a code length of 0 for 11 - 138 times
(7 bits of length)
*/
// to generate the true code lengths of the Huffman Codes for the literal
// and distance tables together
const
literalCodeLengths
=
[];
let
prevCodeLength
=
0
;
while
(
literalCodeLengths
.
length
<
numLiteralLengthCodes
+
numDistanceCodes
)
{
const
symbol
=
decodeSymbol
(
bstream
,
codeLengthsCodes
);
if
(
symbol
<=
15
)
{
literalCodeLengths
.
push
(
symbol
);
prevCodeLength
=
symbol
;
}
else
if
(
symbol
==
16
)
{
let
repeat
=
bstream
.
readBits
(
2
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
prevCodeLength
);
}
}
else
if
(
symbol
==
17
)
{
let
repeat
=
bstream
.
readBits
(
3
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
else
if
(
symbol
==
18
)
{
let
repeat
=
bstream
.
readBits
(
7
)
+
11
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
0
);
}
var
bFinal
=
bstream
.
readBits
(
1
),
bType
=
bstream
.
readBits
(
2
);
blockSize
=
0
;
++
numBlocks
;
// no compression
if
(
bType
==
0
)
{
// skip remaining bits in this byte
while
(
bstream
.
bitPtr
!=
0
)
bstream
.
readBits
(
1
);
var
len
=
bstream
.
readBits
(
16
),
nlen
=
bstream
.
readBits
(
16
);
// TODO: check if nlen is the ones-complement of len?
if
(
len
>
0
)
buffer
.
insertBytes
(
bstream
.
readBytes
(
len
));
blockSize
=
len
;
}
// fixed Huffman codes
else
if
(
bType
==
1
)
{
blockSize
=
inflateBlockData
(
bstream
,
getFixedLiteralTable
(),
getFixedDistanceTable
(),
buffer
);
}
// dynamic Huffman codes
else
if
(
bType
==
2
)
{
var
numLiteralLengthCodes
=
bstream
.
readBits
(
5
)
+
257
;
var
numDistanceCodes
=
bstream
.
readBits
(
5
)
+
1
,
numCodeLengthCodes
=
bstream
.
readBits
(
4
)
+
4
;
// populate the array of code length codes (first de-compaction)
var
codeLengthsCodeLengths
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
for
(
var
i
=
0
;
i
<
numCodeLengthCodes
;
++
i
)
{
codeLengthsCodeLengths
[
CodeLengthCodeOrder
[
i
]
]
=
bstream
.
readBits
(
3
);
}
// get the Huffman Codes for the code lengths
var
codeLengthsCodes
=
getHuffmanCodes
(
codeLengthsCodeLengths
);
// now follow this mapping
/*
0 - 15: Represent code lengths of 0 - 15
16: Copy the previous code length 3 - 6 times.
The next 2 bits indicate repeat length
(0 = 3, ... , 3 = 6)
Example: Codes 8, 16 (+2 bits 11),
16 (+2 bits 10) will expand to
12 code lengths of 8 (1 + 6 + 5)
17: Repeat a code length of 0 for 3 - 10 times.
(3 bits of length)
18: Repeat a code length of 0 for 11 - 138 times
(7 bits of length)
*/
// to generate the true code lengths of the Huffman Codes for the literal
// and distance tables together
var
literalCodeLengths
=
[];
var
prevCodeLength
=
0
;
while
(
literalCodeLengths
.
length
<
numLiteralLengthCodes
+
numDistanceCodes
)
{
var
symbol
=
decodeSymbol
(
bstream
,
codeLengthsCodes
);
if
(
symbol
<=
15
)
{
literalCodeLengths
.
push
(
symbol
);
prevCodeLength
=
symbol
;
}
else
if
(
symbol
==
16
)
{
var
repeat
=
bstream
.
readBits
(
2
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
prevCodeLength
);
}
}
else
if
(
symbol
==
17
)
{
var
repeat
=
bstream
.
readBits
(
3
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
else
if
(
symbol
==
18
)
{
var
repeat
=
bstream
.
readBits
(
7
)
+
11
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
}
// now split the distance code lengths out of the literal code array
var
distanceCodeLengths
=
literalCodeLengths
.
splice
(
numLiteralLengthCodes
,
numDistanceCodes
);
// now generate the true Huffman Code tables using these code lengths
var
hcLiteralTable
=
getHuffmanCodes
(
literalCodeLengths
),
hcDistanceTable
=
getHuffmanCodes
(
distanceCodeLengths
);
blockSize
=
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
);
}
// error
else
{
err
(
"Error! Encountered deflate block of type 3"
);
return
null
;
}
}
// now split the distance code lengths out of the literal code array
const
distanceCodeLengths
=
literalCodeLengths
.
splice
(
numLiteralLengthCodes
,
numDistanceCodes
);
// now generate the true Huffman Code tables using these code lengths
const
hcLiteralTable
=
getHuffmanCodes
(
literalCodeLengths
);
const
hcDistanceTable
=
getHuffmanCodes
(
distanceCodeLengths
);
blockSize
=
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
);
}
else
{
// error
err
(
"Error! Encountered deflate block of type 3"
);
return
null
;
}
// update progress
currentBytesUnarchivedInFile
+=
blockSize
;
currentBytesUnarchived
+=
blockSize
;
postProgress
();
// update progress
currentBytesUnarchivedInFile
+=
blockSize
;
currentBytesUnarchived
+=
blockSize
;
postProgress
();
}
while
(
bFinal
!=
1
);
// we are done reading blocks if the bFinal bit was set for this block
...
...
@@ -523,143 +631,7 @@ function inflate(compressedData, numDecompressedBytes) {
return
buffer
.
data
;
}
function
unzip
()
{
let
bstream
=
bytestream
.
tee
();
// loop until we don't see any more local files
while
(
bstream
.
peekNumber
(
4
)
==
zLocalFileHeaderSignature
)
{
const
oneLocalFile
=
new
ZipLocalFile
(
bstream
);
// this should strip out directories/folders
if
(
oneLocalFile
&&
oneLocalFile
.
uncompressedSize
>
0
&&
oneLocalFile
.
fileData
)
{
// If we make it to this point and haven't thrown an error, we have successfully
// read in the data for a local file, so we can update the actual bytestream.
bytestream
=
bstream
.
tee
();
allLocalFiles
.
push
(
oneLocalFile
);
totalUncompressedBytesInArchive
+=
oneLocalFile
.
uncompressedSize
;
// update progress
currentFilename
=
oneLocalFile
.
filename
;
currentFileNumber
=
allLocalFiles
.
length
-
1
;
currentBytesUnarchivedInFile
=
0
;
// Actually do the unzipping.
oneLocalFile
.
unzip
();
if
(
oneLocalFile
.
fileData
!=
null
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
oneLocalFile
));
postProgress
();
}
}
}
totalFilesInArchive
=
allLocalFiles
.
length
;
// archive extra data record
if
(
bstream
.
peekNumber
(
4
)
==
zArchiveExtraDataSignature
)
{
if
(
logToConsole
)
{
info
(
" Found an Archive Extra Data Signature"
);
}
// skipping this record for now
bstream
.
readNumber
(
4
);
const
archiveExtraFieldLength
=
bstream
.
readNumber
(
4
);
bstream
.
readString
(
archiveExtraFieldLength
);
}
// central directory structure
// TODO: handle the rest of the structures (Zip64 stuff)
if
(
bytestream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
if
(
logToConsole
)
{
info
(
" Found a Central File Header"
);
}
// read all file headers
while
(
bstream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
bstream
.
readNumber
(
4
);
// signature
bstream
.
readNumber
(
2
);
// version made by
bstream
.
readNumber
(
2
);
// version needed to extract
bstream
.
readNumber
(
2
);
// general purpose bit flag
bstream
.
readNumber
(
2
);
// compression method
bstream
.
readNumber
(
2
);
// last mod file time
bstream
.
readNumber
(
2
);
// last mod file date
bstream
.
readNumber
(
4
);
// crc32
bstream
.
readNumber
(
4
);
// compressed size
bstream
.
readNumber
(
4
);
// uncompressed size
const
fileNameLength
=
bstream
.
readNumber
(
2
);
// file name length
const
extraFieldLength
=
bstream
.
readNumber
(
2
);
// extra field length
const
fileCommentLength
=
bstream
.
readNumber
(
2
);
// file comment length
bstream
.
readNumber
(
2
);
// disk number start
bstream
.
readNumber
(
2
);
// internal file attributes
bstream
.
readNumber
(
4
);
// external file attributes
bstream
.
readNumber
(
4
);
// relative offset of local header
bstream
.
readString
(
fileNameLength
);
// file name
bstream
.
readString
(
extraFieldLength
);
// extra field
bstream
.
readString
(
fileCommentLength
);
// file comment
}
}
// digital signature
if
(
bstream
.
peekNumber
(
4
)
==
zDigitalSignatureSignature
)
{
if
(
logToConsole
)
{
info
(
" Found a Digital Signature"
);
}
bstream
.
readNumber
(
4
);
const
sizeOfSignature
=
bstream
.
readNumber
(
2
);
bstream
.
readString
(
sizeOfSignature
);
// digital signature data
}
postProgress
();
bytestream
=
bstream
.
tee
();
}
// event.data.file has the first ArrayBuffer.
// event.data.bytes has all subsequent ArrayBuffers.
// event.data.file has the ArrayBuffer.
onmessage
=
function
(
event
)
{
const
bytes
=
event
.
data
.
file
||
event
.
data
.
bytes
;
logToConsole
=
!!
event
.
data
.
logToConsole
;
// This is the very first time we have been called. Initialize the bytestream.
if
(
!
bytestream
)
{
bytestream
=
new
bitjs
.
io
.
ByteStream
(
bytes
);
}
else
{
bytestream
.
push
(
bytes
);
}
if
(
unarchiveState
===
UnarchiveState
.
NOT_STARTED
)
{
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
currentBytesUnarchived
=
0
;
allLocalFiles
=
[];
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
unarchiveState
=
UnarchiveState
.
UNARCHIVING
;
postProgress
();
}
if
(
unarchiveState
===
UnarchiveState
.
UNARCHIVING
||
unarchiveState
===
UnarchiveState
.
WAITING
)
{
try
{
unzip
();
unarchiveState
=
UnarchiveState
.
FINISHED
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
}
else
{
console
.
error
(
'Found an error while unzipping'
);
console
.
dir
(
e
);
throw
e
;
}
}
}
unzip
(
event
.
data
.
file
,
true
);
};
cps/static/js/io/bitstream.js
View file @
7982ed87
...
...
@@ -12,277 +12,224 @@
var
bitjs
=
bitjs
||
{};
bitjs
.
io
=
bitjs
.
io
||
{};
(
function
()
{
// mask for getting the Nth bit (zero-based)
bitjs
.
BIT
=
[
0x01
,
0x02
,
0x04
,
0x08
,
0x10
,
0x20
,
0x40
,
0x80
,
0x100
,
0x200
,
0x400
,
0x800
,
0x1000
,
0x2000
,
0x4000
,
0x8000
];
// mask for getting N number of bits (0-8)
var
BITMASK
=
[
0
,
0x01
,
0x03
,
0x07
,
0x0F
,
0x1F
,
0x3F
,
0x7F
,
0xFF
];
/**
* This object allows you to peek and consume bits and bytes out of a stream.
* Note that this stream is optimized, and thus, will *NOT* throw an error if
* the end of the stream is reached. Only use this in scenarios where you
* already have all the bits you need.
* This bit stream peeks and consumes bits out of a binary stream.
*
* @param {ArrayBuffer} ab An ArrayBuffer object or a Uint8Array.
* @param {boolean} rtl Whether the stream reads bits from the byte starting
* from bit 7 to 0 (true) or bit 0 to 7 (false).
* @param {Number} opt_offset The offset into the ArrayBuffer
* @param {Number} opt_length The length of this BitStream
*/
bitjs
.
io
.
BitStream
=
class
{
/**
* @param {ArrayBuffer} ab An ArrayBuffer object or a Uint8Array.
* @param {boolean} rtl Whether the stream reads bits from the byte starting
* from bit 7 to 0 (true) or bit 0 to 7 (false).
* @param {Number} opt_offset The offset into the ArrayBuffer
* @param {Number} opt_length The length of this BitStream
*/
constructor
(
ab
,
rtl
,
opt_offset
,
opt_length
)
{
if
(
!
(
ab
instanceof
ArrayBuffer
))
{
throw
'Error! BitArray constructed with an invalid ArrayBuffer object'
;
}
const
offset
=
opt_offset
||
0
;
const
length
=
opt_length
||
ab
.
byteLength
;
/**
* The bytes in the stream.
* @type {Uint8Array}
* @private
*/
this
.
bytes
=
new
Uint8Array
(
ab
,
offset
,
length
);
/**
* The byte in the stream that we are currently on.
* @type {Number}
* @private
*/
this
.
bytePtr
=
0
;
/**
* The bit in the current byte that we will read next (can have values 0 through 7).
* @type {Number}
* @private
*/
this
.
bitPtr
=
0
;
// tracks which bit we are on (can have values 0 through 7)
/**
* An ever-increasing number.
* @type {Number}
* @private
*/
this
.
bitsRead_
=
0
;
this
.
peekBits
=
rtl
?
this
.
peekBits_rtl
:
this
.
peekBits_ltr
;
bitjs
.
io
.
BitStream
=
function
(
ab
,
rtl
,
opt_offset
,
opt_length
)
{
if
(
!
ab
||
!
ab
.
toString
||
ab
.
toString
()
!==
"[object ArrayBuffer]"
)
{
throw
"Error! BitArray constructed with an invalid ArrayBuffer object"
;
}
/**
* Returns how many bites have been read in the stream since the beginning of time.
*/
getNumBitsRead
()
{
return
this
.
bitsRead_
;
}
var
offset
=
opt_offset
||
0
;
var
length
=
opt_length
||
ab
.
byteLength
;
this
.
bytes
=
new
Uint8Array
(
ab
,
offset
,
length
);
this
.
bytePtr
=
0
;
// tracks which byte we are on
this
.
bitPtr
=
0
;
// tracks which bit we are on (can have values 0 through 7)
this
.
peekBits
=
rtl
?
this
.
peekBits_rtl
:
this
.
peekBits_ltr
;
};
/**
* Returns how many bits are currently in the stream left to be read.
*/
getNumBitsLeft
()
{
const
bitsLeftInByte
=
8
-
this
.
bitPtr
;
return
(
this
.
bytes
.
byteLength
-
this
.
bytePtr
-
1
)
*
8
+
bitsLeftInByte
;
/**
* byte0 byte1 byte2 byte3
* 7......0 | 7......0 | 7......0 | 7......0
*
* The bit pointer starts at bit0 of byte0 and moves left until it reaches
* bit7 of byte0, then jumps to bit0 of byte1, etc.
* @param {number} n The number of bits to peek.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {number} The peeked bits, as an unsigned number.
*/
bitjs
.
io
.
BitStream
.
prototype
.
peekBits_ltr
=
function
(
n
,
movePointers
)
{
if
(
n
<=
0
||
typeof
n
!=
typeof
1
)
{
return
0
;
}
/**
* byte0 byte1 byte2 byte3
* 7......0 | 7......0 | 7......0 | 7......0
*
* The bit pointer starts at bit0 of byte0 and moves left until it reaches
* bit7 of byte0, then jumps to bit0 of byte1, etc.
* @param {number} n The number of bits to peek, must be a positive integer.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {number} The peeked bits, as an unsigned number.
*/
peekBits_ltr
(
n
,
opt_movePointers
)
{
const
NUM
=
parseInt
(
n
,
10
);
let
num
=
NUM
;
if
(
n
!==
num
||
num
<=
0
)
{
return
0
;
var
movePointers
=
movePointers
||
false
,
bytePtr
=
this
.
bytePtr
,
bitPtr
=
this
.
bitPtr
,
result
=
0
,
bitsIn
=
0
,
bytes
=
this
.
bytes
;
// keep going until we have no more bits left to peek at
// TODO: Consider putting all bits from bytes we will need into a variable and then
// shifting/masking it to just extract the bits we want.
// This could be considerably faster when reading more than 3 or 4 bits at a time.
while
(
n
>
0
)
{
if
(
bytePtr
>=
bytes
.
length
)
{
throw
"Error! Overflowed the bit stream! n="
+
n
+
", bytePtr="
+
bytePtr
+
", bytes.length="
+
bytes
.
length
+
", bitPtr="
+
bitPtr
;
return
-
1
;
}
const
BITMASK
=
bitjs
.
io
.
BitStream
.
BITMASK
;
const
movePointers
=
opt_movePointers
||
false
;
let
bytes
=
this
.
bytes
;
let
bytePtr
=
this
.
bytePtr
;
let
bitPtr
=
this
.
bitPtr
;
let
result
=
0
;
let
bitsIn
=
0
;
// keep going until we have no more bits left to peek at
while
(
num
>
0
)
{
// We overflowed the stream, so just return what we got.
if
(
bytePtr
>=
bytes
.
length
)
{
break
;
}
const
numBitsLeftInThisByte
=
(
8
-
bitPtr
);
if
(
num
>=
numBitsLeftInThisByte
)
{
const
mask
=
(
BITMASK
[
numBitsLeftInThisByte
]
<<
bitPtr
);
result
|=
(((
bytes
[
bytePtr
]
&
mask
)
>>
bitPtr
)
<<
bitsIn
);
bytePtr
++
;
bitPtr
=
0
;
bitsIn
+=
numBitsLeftInThisByte
;
num
-=
numBitsLeftInThisByte
;
}
else
{
const
mask
=
(
BITMASK
[
num
]
<<
bitPtr
);
result
|=
(((
bytes
[
bytePtr
]
&
mask
)
>>
bitPtr
)
<<
bitsIn
);
bitPtr
+=
num
;
break
;
}
var
numBitsLeftInThisByte
=
(
8
-
bitPtr
);
if
(
n
>=
numBitsLeftInThisByte
)
{
var
mask
=
(
BITMASK
[
numBitsLeftInThisByte
]
<<
bitPtr
);
result
|=
(((
bytes
[
bytePtr
]
&
mask
)
>>
bitPtr
)
<<
bitsIn
);
bytePtr
++
;
bitPtr
=
0
;
bitsIn
+=
numBitsLeftInThisByte
;
n
-=
numBitsLeftInThisByte
;
}
else
{
var
mask
=
(
BITMASK
[
n
]
<<
bitPtr
);
result
|=
(((
bytes
[
bytePtr
]
&
mask
)
>>
bitPtr
)
<<
bitsIn
);
if
(
movePointers
)
{
this
.
bitPtr
=
bitPtr
;
this
.
bytePtr
=
bytePtr
;
this
.
bitsRead_
+=
NUM
;
bitPtr
+=
n
;
bitsIn
+=
n
;
n
=
0
;
}
}
return
result
;
if
(
movePointers
)
{
this
.
bitPtr
=
bitPtr
;
this
.
bytePtr
=
bytePtr
;
}
/**
* byte0 byte1 byte2 byte3
* 7......0 | 7......0 | 7......0 | 7......0
*
* The bit pointer starts at bit7 of byte0 and moves right until it reaches
* bit0 of byte0, then goes to bit7 of byte1, etc.
* @param {number} n The number of bits to peek. Must be a positive integer.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {number} The peeked bits, as an unsigned number.
*/
peekBits_rtl
(
n
,
opt_movePointers
)
{
const
NUM
=
parseInt
(
n
,
10
);
let
num
=
NUM
;
if
(
n
!==
num
||
num
<=
0
)
{
return
0
;
}
return
result
;
};
const
BITMASK
=
bitjs
.
io
.
BitStream
.
BITMASK
;
const
movePointers
=
opt_movePointers
||
false
;
let
bytes
=
this
.
bytes
;
let
bytePtr
=
this
.
bytePtr
;
let
bitPtr
=
this
.
bitPtr
;
let
result
=
0
;
// keep going until we have no more bits left to peek at
while
(
num
>
0
)
{
// We overflowed the stream, so just return the bits we got.
if
(
bytePtr
>=
bytes
.
length
)
{
break
;
}
const
numBitsLeftInThisByte
=
(
8
-
bitPtr
);
if
(
num
>=
numBitsLeftInThisByte
)
{
result
<<=
numBitsLeftInThisByte
;
result
|=
(
BITMASK
[
numBitsLeftInThisByte
]
&
bytes
[
bytePtr
]);
bytePtr
++
;
bitPtr
=
0
;
num
-=
numBitsLeftInThisByte
;
}
else
{
result
<<=
num
;
const
numBits
=
8
-
num
-
bitPtr
;
result
|=
((
bytes
[
bytePtr
]
&
(
BITMASK
[
num
]
<<
numBits
))
>>
numBits
);
bitPtr
+=
num
;
break
;
}
/**
* byte0 byte1 byte2 byte3
* 7......0 | 7......0 | 7......0 | 7......0
*
* The bit pointer starts at bit7 of byte0 and moves right until it reaches
* bit0 of byte0, then goes to bit7 of byte1, etc.
* @param {number} n The number of bits to peek.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {number} The peeked bits, as an unsigned number.
*/
bitjs
.
io
.
BitStream
.
prototype
.
peekBits_rtl
=
function
(
n
,
movePointers
)
{
if
(
n
<=
0
||
typeof
n
!=
typeof
1
)
{
return
0
;
}
var
movePointers
=
movePointers
||
false
,
bytePtr
=
this
.
bytePtr
,
bitPtr
=
this
.
bitPtr
,
result
=
0
,
bytes
=
this
.
bytes
;
// keep going until we have no more bits left to peek at
// TODO: Consider putting all bits from bytes we will need into a variable and then
// shifting/masking it to just extract the bits we want.
// This could be considerably faster when reading more than 3 or 4 bits at a time.
while
(
n
>
0
)
{
if
(
bytePtr
>=
bytes
.
length
)
{
throw
"Error! Overflowed the bit stream! n="
+
n
+
", bytePtr="
+
bytePtr
+
", bytes.length="
+
bytes
.
length
+
", bitPtr="
+
bitPtr
;
return
-
1
;
}
if
(
movePointers
)
{
this
.
bitPtr
=
bitPtr
;
this
.
bytePtr
=
bytePtr
;
this
.
bitsRead_
+=
NUM
;
var
numBitsLeftInThisByte
=
(
8
-
bitPtr
);
if
(
n
>=
numBitsLeftInThisByte
)
{
result
<<=
numBitsLeftInThisByte
;
result
|=
(
BITMASK
[
numBitsLeftInThisByte
]
&
bytes
[
bytePtr
]);
bytePtr
++
;
bitPtr
=
0
;
n
-=
numBitsLeftInThisByte
;
}
else
{
result
<<=
n
;
result
|=
((
bytes
[
bytePtr
]
&
(
BITMASK
[
n
]
<<
(
8
-
n
-
bitPtr
)))
>>
(
8
-
n
-
bitPtr
));
return
result
;
bitPtr
+=
n
;
n
=
0
;
}
}
/**
* Peek at 16 bits from current position in the buffer.
* Bit at (bytePtr,bitPtr) has the highest position in returning data.
* Taken from getbits.hpp in unrar.
* TODO: Move this out of BitStream and into unrar.
*/
getBits
()
{
return
(((((
this
.
bytes
[
this
.
bytePtr
]
&
0xff
)
<<
16
)
+
((
this
.
bytes
[
this
.
bytePtr
+
1
]
&
0xff
)
<<
8
)
+
((
this
.
bytes
[
this
.
bytePtr
+
2
]
&
0xff
)))
>>>
(
8
-
this
.
bitPtr
))
&
0xffff
);
if
(
movePointers
)
{
this
.
bitPtr
=
bitPtr
;
this
.
bytePtr
=
bytePtr
;
}
/**
* Reads n bits out of the stream, consuming them (moving the bit pointer).
* @param {number} n The number of bits to read. Must be a positive integer.
* @return {number} The read bits, as an unsigned number.
*/
readBits
(
n
)
{
return
this
.
peekBits
(
n
,
true
);
}
return
result
;
};
/**
* This returns n bytes as a sub-array, advancing the pointer if movePointers
* is true. Only use this for uncompressed blocks as this throws away remaining
* bits in the current byte.
* @param {number} n The number of bytes to peek. Must be a positive integer.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {Uint8Array} The subarray.
*/
peekBytes
(
n
,
opt_movePointers
)
{
const
num
=
parseInt
(
n
,
10
);
if
(
n
!==
num
||
num
<
0
)
{
throw
'Error! Called peekBytes() with a non-positive integer: '
+
n
;
}
else
if
(
num
===
0
)
{
return
new
Uint8Array
();
}
// Flush bits until we are byte-aligned.
// from http://tools.ietf.org/html/rfc1951#page-11
// "Any bits of input up to the next byte boundary are ignored."
while
(
this
.
bitPtr
!=
0
)
{
this
.
readBits
(
1
);
}
/**
* Peek at 16 bits from current position in the buffer.
* Bit at (bytePtr,bitPtr) has the highest position in returning data.
* Taken from getbits.hpp in unrar.
* TODO: Move this out of BitStream and into unrar.
*/
bitjs
.
io
.
BitStream
.
prototype
.
getBits
=
function
()
{
return
(((((
this
.
bytes
[
this
.
bytePtr
]
&
0xff
)
<<
16
)
+
((
this
.
bytes
[
this
.
bytePtr
+
1
]
&
0xff
)
<<
8
)
+
((
this
.
bytes
[
this
.
bytePtr
+
2
]
&
0xff
)))
>>>
(
8
-
this
.
bitPtr
))
&
0xffff
);
};
const
numBytesLeft
=
this
.
getNumBitsLeft
()
/
8
;
if
(
num
>
numBytesLeft
)
{
throw
'Error! Overflowed the bit stream! n='
+
num
+
', bytePtr='
+
this
.
bytePtr
+
', bytes.length='
+
this
.
bytes
.
length
+
', bitPtr='
+
this
.
bitPtr
;
}
const
movePointers
=
opt_movePointers
||
false
;
const
result
=
new
Uint8Array
(
num
);
let
bytes
=
this
.
bytes
;
let
ptr
=
this
.
bytePtr
;
let
bytesLeftToCopy
=
num
;
while
(
bytesLeftToCopy
>
0
)
{
const
bytesLeftInStream
=
bytes
.
length
-
ptr
;
const
sourceLength
=
Math
.
min
(
bytesLeftToCopy
,
bytesLeftInStream
)
;
/**
* Reads n bits out of the stream, consuming them (moving the bit pointer).
* @param {number} n The number of bits to read.
* @return {number} The read bits, as an unsigned number.
*/
bitjs
.
io
.
BitStream
.
prototype
.
readBits
=
function
(
n
)
{
return
this
.
peekBits
(
n
,
true
)
;
}
;
result
.
set
(
bytes
.
subarray
(
ptr
,
ptr
+
sourceLength
),
num
-
bytesLeftToCopy
);
ptr
+=
sourceLength
;
// Overflowed the stream, just return what we got.
if
(
ptr
>=
bytes
.
length
)
{
break
;
}
/**
* This returns n bytes as a sub-array, advancing the pointer if movePointers
* is true. Only use this for uncompressed blocks as this throws away remaining
* bits in the current byte.
* @param {number} n The number of bytes to peek.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {Uint8Array} The subarray.
*/
bitjs
.
io
.
BitStream
.
prototype
.
peekBytes
=
function
(
n
,
movePointers
)
{
if
(
n
<=
0
||
typeof
n
!=
typeof
1
)
{
return
0
;
}
bytesLeftToCopy
-=
sourceLength
;
}
// from http://tools.ietf.org/html/rfc1951#page-11
// "Any bits of input up to the next byte boundary are ignored."
while
(
this
.
bitPtr
!=
0
)
{
this
.
readBits
(
1
);
}
if
(
movePointers
)
{
this
.
bytePtr
+=
num
;
this
.
bitsRead_
+=
(
num
*
8
);
}
var
movePointers
=
movePointers
||
false
;
var
bytePtr
=
this
.
bytePtr
,
bitPtr
=
this
.
bitPtr
;
return
result
;
}
var
result
=
this
.
bytes
.
subarray
(
bytePtr
,
bytePtr
+
n
);
/**
* @param {number} n The number of bytes to read.
* @return {Uint8Array} The subarray.
*/
readBytes
(
n
)
{
return
this
.
peekBytes
(
n
,
true
);
if
(
movePointers
)
{
this
.
bytePtr
+=
n
;
}
}
// mask for getting N number of bits (0-8)
bitjs
.
io
.
BitStream
.
BITMASK
=
[
0
,
0x01
,
0x03
,
0x07
,
0x0F
,
0x1F
,
0x3F
,
0x7F
,
0xFF
];
return
result
;
};
/**
* @param {number} n The number of bytes to read.
* @return {Uint8Array} The subarray.
*/
bitjs
.
io
.
BitStream
.
prototype
.
readBytes
=
function
(
n
)
{
return
this
.
peekBytes
(
n
,
true
);
};
})();
cps/static/js/io/bytebuffer.js
View file @
7982ed87
...
...
@@ -12,106 +12,111 @@
var
bitjs
=
bitjs
||
{};
bitjs
.
io
=
bitjs
.
io
||
{};
(
function
()
{
/**
* A write-only Byte buffer which uses a Uint8 Typed Array as a backing store.
* @param {number} numBytes The number of bytes to allocate.
* @constructor
*/
bitjs
.
io
.
ByteBuffer
=
class
{
/**
* @param {number} numBytes The number of bytes to allocate.
*/
constructor
(
numBytes
)
{
if
(
typeof
numBytes
!=
typeof
1
||
numBytes
<=
0
)
{
throw
"Error! ByteBuffer initialized with '"
+
numBytes
+
"'"
;
}
this
.
data
=
new
Uint8Array
(
numBytes
);
this
.
ptr
=
0
;
bitjs
.
io
.
ByteBuffer
=
function
(
numBytes
)
{
if
(
typeof
numBytes
!=
typeof
1
||
numBytes
<=
0
)
{
throw
"Error! ByteBuffer initialized with '"
+
numBytes
+
"'"
;
}
this
.
data
=
new
Uint8Array
(
numBytes
);
this
.
ptr
=
0
;
};
/**
* @param {number} b The byte to insert.
*/
bitjs
.
io
.
ByteBuffer
.
prototype
.
insertByte
=
function
(
b
)
{
// TODO: throw if byte is invalid?
this
.
data
[
this
.
ptr
++
]
=
b
;
};
/**
* @param {number} b The byte to insert.
*/
insertByte
(
b
)
{
// TODO: throw if byte is invalid?
this
.
data
[
this
.
ptr
++
]
=
b
;
/**
* @param {Array.<number>|Uint8Array|Int8Array} bytes The bytes to insert.
*/
bitjs
.
io
.
ByteBuffer
.
prototype
.
insertBytes
=
function
(
bytes
)
{
// TODO: throw if bytes is invalid?
this
.
data
.
set
(
bytes
,
this
.
ptr
);
this
.
ptr
+=
bytes
.
length
;
};
/**
* Writes an unsigned number into the next n bytes. If the number is too large
* to fit into n bytes or is negative, an error is thrown.
* @param {number} num The unsigned number to write.
* @param {number} numBytes The number of bytes to write the number into.
*/
bitjs
.
io
.
ByteBuffer
.
prototype
.
writeNumber
=
function
(
num
,
numBytes
)
{
if
(
numBytes
<
1
)
{
throw
'Trying to write into too few bytes: '
+
numBytes
;
}
if
(
num
<
0
)
{
throw
'Trying to write a negative number ('
+
num
+
') as an unsigned number to an ArrayBuffer'
;
}
if
(
num
>
(
Math
.
pow
(
2
,
numBytes
*
8
)
-
1
))
{
throw
'Trying to write '
+
num
+
' into only '
+
numBytes
+
' bytes'
;
}
/**
* @param {Array.<number>|Uint8Array|Int8Array} bytes The bytes to insert.
*/
insertBytes
(
bytes
)
{
// TODO: throw if bytes is invalid?
this
.
data
.
set
(
bytes
,
this
.
ptr
);
this
.
ptr
+=
bytes
.
length
;
// Roll 8-bits at a time into an array of bytes.
var
bytes
=
[];
while
(
numBytes
--
>
0
)
{
var
eightBits
=
num
&
255
;
bytes
.
push
(
eightBits
);
num
>>=
8
;
}
/**
* Writes an unsigned number into the next n bytes. If the number is too large
* to fit into n bytes or is negative, an error is thrown.
* @param {number} num The unsigned number to write.
* @param {number} numBytes The number of bytes to write the number into.
*/
writeNumber
(
num
,
numBytes
)
{
if
(
numBytes
<
1
||
!
numBytes
)
{
throw
'Trying to write into too few bytes: '
+
numBytes
;
}
if
(
num
<
0
)
{
throw
'Trying to write a negative number ('
+
num
+
') as an unsigned number to an ArrayBuffer'
;
}
if
(
num
>
(
Math
.
pow
(
2
,
numBytes
*
8
)
-
1
))
{
throw
'Trying to write '
+
num
+
' into only '
+
numBytes
+
' bytes'
;
}
this
.
insertBytes
(
bytes
);
};
// Roll 8-bits at a time into an array of bytes.
const
bytes
=
[];
while
(
numBytes
--
>
0
)
{
const
eightBits
=
num
&
255
;
bytes
.
push
(
eightBits
);
num
>>=
8
;
}
this
.
insertBytes
(
bytes
);
/**
* Writes a signed number into the next n bytes. If the number is too large
* to fit into n bytes, an error is thrown.
* @param {number} num The signed number to write.
* @param {number} numBytes The number of bytes to write the number into.
*/
bitjs
.
io
.
ByteBuffer
.
prototype
.
writeSignedNumber
=
function
(
num
,
numBytes
)
{
if
(
numBytes
<
1
)
{
throw
'Trying to write into too few bytes: '
+
numBytes
;
}
/**
* Writes a signed number into the next n bytes. If the number is too large
* to fit into n bytes, an error is thrown.
* @param {number} num The signed number to write.
* @param {number} numBytes The number of bytes to write the number into.
*/
writeSignedNumber
(
num
,
numBytes
)
{
if
(
numBytes
<
1
)
{
throw
'Trying to write into too few bytes: '
+
numBytes
;
}
var
HALF
=
Math
.
pow
(
2
,
(
numBytes
*
8
)
-
1
);
if
(
num
>=
HALF
||
num
<
-
HALF
)
{
throw
'Trying to write '
+
num
+
' into only '
+
numBytes
+
' bytes'
;
}
const
HALF
=
Math
.
pow
(
2
,
(
numBytes
*
8
)
-
1
);
if
(
num
>=
HALF
||
num
<
-
HALF
)
{
throw
'Trying to write '
+
num
+
' into only '
+
numBytes
+
' bytes'
;
}
// Roll 8-bits at a time into an array of bytes.
var
bytes
=
[];
while
(
numBytes
--
>
0
)
{
var
eightBits
=
num
&
255
;
bytes
.
push
(
eightBits
);
num
>>=
8
;
}
// Roll 8-bits at a time into an array of bytes.
const
bytes
=
[];
while
(
numBytes
--
>
0
)
{
const
eightBits
=
num
&
255
;
bytes
.
push
(
eightBits
);
num
>>=
8
;
}
this
.
insertBytes
(
bytes
);
};
this
.
insertBytes
(
bytes
);
}
/**
* @param {string} str The ASCII string to write.
*/
writeASCIIString
(
str
)
{
for
(
let
i
=
0
;
i
<
str
.
length
;
++
i
)
{
const
curByte
=
str
.
charCodeAt
(
i
);
if
(
curByte
<
0
||
curByte
>
255
)
{
throw
'Trying to write a non-ASCII string!'
;
}
this
.
insertByte
(
curByte
);
/**
* @param {string} str The ASCII string to write.
*/
bitjs
.
io
.
ByteBuffer
.
prototype
.
writeASCIIString
=
function
(
str
)
{
for
(
var
i
=
0
;
i
<
str
.
length
;
++
i
)
{
var
curByte
=
str
.
charCodeAt
(
i
);
if
(
curByte
<
0
||
curByte
>
255
)
{
throw
'Trying to write a non-ASCII string!'
;
}
};
}
this
.
insertByte
(
curByte
);
}
};
})();
cps/static/js/io/bytestream.js
View file @
7982ed87
...
...
@@ -12,297 +12,153 @@
var
bitjs
=
bitjs
||
{};
bitjs
.
io
=
bitjs
.
io
||
{};
(
function
()
{
/**
* This object allows you to peek and consume bytes as numbers and strings out
* of a stream. More bytes can be pushed into the back of the stream via the
* push() method.
* This object allows you to peek and consume bytes as numbers and strings
* out of an ArrayBuffer. In this buffer, everything must be byte-aligned.
*
* @param {ArrayBuffer} ab The ArrayBuffer object.
* @param {number=} opt_offset The offset into the ArrayBuffer
* @param {number=} opt_length The length of this BitStream
* @constructor
*/
bitjs
.
io
.
ByteStream
=
class
{
/**
* @param {ArrayBuffer} ab The ArrayBuffer object.
* @param {number=} opt_offset The offset into the ArrayBuffer
* @param {number=} opt_length The length of this BitStream
*/
constructor
(
ab
,
opt_offset
,
opt_length
)
{
if
(
!
(
ab
instanceof
ArrayBuffer
))
{
throw
'Error! BitArray constructed with an invalid ArrayBuffer object'
;
}
const
offset
=
opt_offset
||
0
;
const
length
=
opt_length
||
ab
.
byteLength
;
/**
* The current page of bytes in the stream.
* @type {Uint8Array}
* @private
*/
this
.
bytes
=
new
Uint8Array
(
ab
,
offset
,
length
);
/**
* The next pages of bytes in the stream.
* @type {Array<Uint8Array>}
* @private
*/
this
.
pages_
=
[];
/**
* The byte in the current page that we will read next.
* @type {Number}
* @private
*/
this
.
ptr
=
0
;
bitjs
.
io
.
ByteStream
=
function
(
ab
,
opt_offset
,
opt_length
)
{
var
offset
=
opt_offset
||
0
;
var
length
=
opt_length
||
ab
.
byteLength
;
this
.
bytes
=
new
Uint8Array
(
ab
,
offset
,
length
);
this
.
ptr
=
0
;
};
/**
* An ever-increasing number.
* @type {Number}
* @private
*/
this
.
bytesRead_
=
0
;
}
/**
* Returns how many bytes have been read in the stream since the beginning of time.
*/
getNumBytesRead
()
{
return
this
.
bytesRead_
;
}
/**
* Returns how many bytes are currently in the stream left to be read.
*/
getNumBytesLeft
()
{
const
bytesInCurrentPage
=
(
this
.
bytes
.
byteLength
-
this
.
ptr
);
return
this
.
pages_
.
reduce
((
acc
,
arr
)
=>
acc
+
arr
.
length
,
bytesInCurrentPage
);
}
/**
* Move the pointer ahead n bytes. If the pointer is at the end of the current array
* of bytes and we have another page of bytes, point at the new page. This is a private
* method, no validation is done.
* @param {number} n Number of bytes to increment.
* @private
*/
movePointer_
(
n
)
{
this
.
ptr
+=
n
;
this
.
bytesRead_
+=
n
;
while
(
this
.
ptr
>=
this
.
bytes
.
length
&&
this
.
pages_
.
length
>
0
)
{
this
.
ptr
-=
this
.
bytes
.
length
;
this
.
bytes
=
this
.
pages_
.
shift
();
}
/**
* Peeks at the next n bytes as an unsigned number but does not advance the
* pointer
* TODO: This apparently cannot read more than 4 bytes as a number?
* @param {number} n The number of bytes to peek at.
* @return {number} The n bytes interpreted as an unsigned number.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
peekNumber
=
function
(
n
)
{
// TODO: return error if n would go past the end of the stream?
if
(
n
<=
0
||
typeof
n
!=
typeof
1
)
return
-
1
;
var
result
=
0
;
// read from last byte to first byte and roll them in
var
curByte
=
this
.
ptr
+
n
-
1
;
while
(
curByte
>=
this
.
ptr
)
{
result
<<=
8
;
result
|=
this
.
bytes
[
curByte
];
--
curByte
;
}
return
result
;
};
/**
* Peeks at the next n bytes as an unsigned number but does not advance the
* pointer.
* @param {number} n The number of bytes to peek at. Must be a positive integer.
* @return {number} The n bytes interpreted as an unsigned number.
*/
peekNumber
(
n
)
{
const
num
=
parseInt
(
n
,
10
);
if
(
n
!==
num
||
num
<
0
)
{
throw
'Error! Called peekNumber() with a non-positive integer'
;
}
else
if
(
num
===
0
)
{
return
0
;
}
if
(
n
>
4
)
{
throw
'Error! Called peekNumber('
+
n
+
') but this method can only reliably read numbers up to 4 bytes long'
;
}
if
(
this
.
getNumBytesLeft
()
<
num
)
{
throw
'Error! Overflowed the byte stream while peekNumber()! n='
+
num
+
', ptr='
+
this
.
ptr
+
', bytes.length='
+
this
.
getNumBytesLeft
();
}
let
result
=
0
;
let
curPage
=
this
.
bytes
;
let
pageIndex
=
0
;
let
ptr
=
this
.
ptr
;
for
(
let
i
=
0
;
i
<
num
;
++
i
)
{
result
|=
(
curPage
[
ptr
++
]
<<
(
i
*
8
))
;
/**
* Returns the next n bytes as an unsigned number (or -1 on error)
* and advances the stream pointer n bytes.
* @param {number} n The number of bytes to read.
* @return {number} The n bytes interpreted as an unsigned number.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
readNumber
=
function
(
n
)
{
var
num
=
this
.
peekNumber
(
n
)
;
this
.
ptr
+=
n
;
return
num
;
}
;
if
(
ptr
>=
curPage
.
length
)
{
curPage
=
this
.
pages_
[
pageIndex
++
];
ptr
=
0
;
}
}
return
result
;
}
/**
* Returns the next n bytes as a signed number but does not advance the
* pointer.
* @param {number} n The number of bytes to read.
* @return {number} The bytes interpreted as a signed number.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
peekSignedNumber
=
function
(
n
)
{
var
num
=
this
.
peekNumber
(
n
);
var
HALF
=
Math
.
pow
(
2
,
(
n
*
8
)
-
1
);
var
FULL
=
HALF
*
2
;
if
(
num
>=
HALF
)
num
-=
FULL
;
/**
* Returns the next n bytes as an unsigned number (or -1 on error)
* and advances the stream pointer n bytes.
* @param {number} n The number of bytes to read. Must be a positive integer.
* @return {number} The n bytes interpreted as an unsigned number.
*/
readNumber
(
n
)
{
const
num
=
this
.
peekNumber
(
n
);
this
.
movePointer_
(
n
);
return
num
;
}
return
num
;
};
/**
* Returns the next n bytes as a signed number but does not advance the
* pointer
.
* @param {number} n The number of bytes to read. Must be a positive integ
er.
* @return {number} The bytes interpreted as a signed number.
*/
peekSignedNumber
(
n
)
{
let
num
=
this
.
peekNumber
(
n
)
;
const
HALF
=
Math
.
pow
(
2
,
(
n
*
8
)
-
1
)
;
const
FULL
=
HALF
*
2
;
/**
* Returns the next n bytes as a signed number and advances the stream pointer.
* @param {number} n The number of bytes to read
.
* @return {number} The bytes interpreted as a signed numb
er.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
readSignedNumber
=
function
(
n
)
{
var
num
=
this
.
peekSignedNumber
(
n
);
this
.
ptr
+=
n
;
return
num
;
}
;
if
(
num
>=
HALF
)
num
-=
FULL
;
return
num
;
/**
* This returns n bytes as a sub-array, advancing the pointer if movePointers
* is true.
* @param {number} n The number of bytes to read.
* @param {boolean} movePointers Whether to move the pointers.
* @return {Uint8Array} The subarray.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
peekBytes
=
function
(
n
,
movePointers
)
{
if
(
n
<=
0
||
typeof
n
!=
typeof
1
)
{
return
null
;
}
var
result
=
this
.
bytes
.
subarray
(
this
.
ptr
,
this
.
ptr
+
n
);
/**
* Returns the next n bytes as a signed number and advances the stream pointer.
* @param {number} n The number of bytes to read. Must be a positive integer.
* @return {number} The bytes interpreted as a signed number.
*/
readSignedNumber
(
n
)
{
const
num
=
this
.
peekSignedNumber
(
n
);
this
.
movePointer_
(
n
);
return
num
;
if
(
movePointers
)
{
this
.
ptr
+=
n
;
}
return
result
;
};
/**
* This returns n bytes as a sub-array, advancing the pointer if movePointers
* is true.
* @param {number} n The number of bytes to read. Must be a positive integer.
* @param {boolean} movePointers Whether to move the pointers.
* @return {Uint8Array} The subarray.
*/
peekBytes
(
n
,
movePointers
)
{
const
num
=
parseInt
(
n
,
10
);
if
(
n
!==
num
||
num
<
0
)
{
throw
'Error! Called peekBytes() with a non-positive integer'
;
}
else
if
(
num
===
0
)
{
return
new
Uint8Array
();
}
const
totalBytesLeft
=
this
.
getNumBytesLeft
();
if
(
num
>
totalBytesLeft
)
{
throw
'Error! Overflowed the byte stream during peekBytes! n='
+
num
+
', ptr='
+
this
.
ptr
+
', bytes.length='
+
this
.
getNumBytesLeft
();
}
const
result
=
new
Uint8Array
(
num
);
let
curPage
=
this
.
bytes
;
let
ptr
=
this
.
ptr
;
let
bytesLeftToCopy
=
num
;
let
pageIndex
=
0
;
while
(
bytesLeftToCopy
>
0
)
{
const
bytesLeftInPage
=
curPage
.
length
-
ptr
;
const
sourceLength
=
Math
.
min
(
bytesLeftToCopy
,
bytesLeftInPage
);
result
.
set
(
curPage
.
subarray
(
ptr
,
ptr
+
sourceLength
),
num
-
bytesLeftToCopy
);
ptr
+=
sourceLength
;
if
(
ptr
>=
curPage
.
length
)
{
curPage
=
this
.
pages_
[
pageIndex
++
];
ptr
=
0
;
}
bytesLeftToCopy
-=
sourceLength
;
}
if
(
movePointers
)
{
this
.
movePointer_
(
num
);
}
/**
* Reads the next n bytes as a sub-array.
* @param {number} n The number of bytes to read.
* @return {Uint8Array} The subarray.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
readBytes
=
function
(
n
)
{
return
this
.
peekBytes
(
n
,
true
);
};
return
result
;
}
/**
* Reads the next n bytes as a sub-array.
* @param {number} n The number of bytes to read. Must be a positive integer.
* @return {Uint8Array} The subarray.
*/
readBytes
(
n
)
{
return
this
.
peekBytes
(
n
,
true
);
/**
* Peeks at the next n bytes as a string but does not advance the pointer.
* @param {number} n The number of bytes to peek at.
* @return {string} The next n bytes as a string.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
peekString
=
function
(
n
)
{
if
(
n
<=
0
||
typeof
n
!=
typeof
1
)
{
return
""
;
}
/**
* Peeks at the next n bytes as an ASCII string but does not advance the pointer.
* @param {number} n The number of bytes to peek at. Must be a positive integer.
* @return {string} The next n bytes as a string.
*/
peekString
(
n
)
{
const
num
=
parseInt
(
n
,
10
);
if
(
n
!==
num
||
num
<
0
)
{
throw
'Error! Called peekString() with a non-positive integer'
;
}
else
if
(
num
===
0
)
{
return
''
;
}
const
totalBytesLeft
=
this
.
getNumBytesLeft
();
if
(
num
>
totalBytesLeft
)
{
throw
'Error! Overflowed the byte stream while peekString()! n='
+
num
+
', ptr='
+
this
.
ptr
+
', bytes.length='
+
this
.
getNumBytesLeft
();
}
let
result
=
new
Array
(
num
);
let
curPage
=
this
.
bytes
;
let
pageIndex
=
0
;
let
ptr
=
this
.
ptr
;
for
(
let
i
=
0
;
i
<
num
;
++
i
)
{
result
[
i
]
=
String
.
fromCharCode
(
curPage
[
ptr
++
]);
if
(
ptr
>=
curPage
.
length
)
{
curPage
=
this
.
pages_
[
pageIndex
++
];
ptr
=
0
;
}
}
return
result
.
join
(
''
);
var
result
=
""
;
for
(
var
p
=
this
.
ptr
,
end
=
this
.
ptr
+
n
;
p
<
end
;
++
p
)
{
result
+=
String
.
fromCharCode
(
this
.
bytes
[
p
]);
}
return
result
;
};
/**
* Returns the next n bytes as an ASCII string and advances the stream pointer
* n bytes.
* @param {number} n The number of bytes to read. Must be a positive integer.
* @return {string} The next n bytes as a string.
*/
readString
(
n
)
{
const
strToReturn
=
this
.
peekString
(
n
);
this
.
movePointer_
(
n
);
return
strToReturn
;
}
/**
* Feeds more bytes into the back of the stream.
* @param {ArrayBuffer} ab
*/
push
(
ab
)
{
if
(
!
(
ab
instanceof
ArrayBuffer
))
{
throw
'Error! ByteStream.push() called with an invalid ArrayBuffer object'
;
}
/**
* Returns the next n bytes as an ASCII string and advances the stream pointer
* n bytes.
* @param {number} n The number of bytes to read.
* @return {string} The next n bytes as a string.
*/
bitjs
.
io
.
ByteStream
.
prototype
.
readString
=
function
(
n
)
{
var
strToReturn
=
this
.
peekString
(
n
);
this
.
ptr
+=
n
;
return
strToReturn
;
};
this
.
pages_
.
push
(
new
Uint8Array
(
ab
));
// If the pointer is at the end of the current page of bytes, this will advance
// to the next page.
this
.
movePointer_
(
0
);
}
/**
* Creates a new ByteStream from this ByteStream that can be read / peeked.
* @return {bitjs.io.ByteStream} A clone of this ByteStream.
*/
tee
()
{
const
clone
=
new
bitjs
.
io
.
ByteStream
(
this
.
bytes
.
buffer
);
clone
.
bytes
=
this
.
bytes
;
clone
.
ptr
=
this
.
ptr
;
clone
.
pages_
=
this
.
pages_
.
slice
();
clone
.
bytesRead_
=
this
.
bytesRead_
;
return
clone
;
}
}
})();
cps/static/js/kthoom.js
View file @
7982ed87
...
...
@@ -160,7 +160,7 @@ function initProgressClick() {
function
loadFromArrayBuffer
(
ab
)
{
var
start
=
(
new
Date
).
getTime
();
var
h
=
new
Uint8Array
(
ab
,
0
,
10
);
var
pathToBitJS
=
"../../static/js/"
;
var
pathToBitJS
=
"../../static/js/
archive/
"
;
if
(
h
[
0
]
===
0x52
&&
h
[
1
]
===
0x61
&&
h
[
2
]
===
0x72
&&
h
[
3
]
===
0x21
)
{
//Rar!
unarchiver
=
new
bitjs
.
archive
.
Unrarrer
(
ab
,
pathToBitJS
);
}
else
if
(
h
[
0
]
===
80
&&
h
[
1
]
===
75
)
{
//PK (Zip)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment