Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
D
douban-api-proxy
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
captainwong
douban-api-proxy
Commits
c0d136cc
Commit
c0d136cc
authored
May 09, 2019
by
subdiox
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix slow loading
parent
479b4b7d
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
3920 additions
and
1908 deletions
+3920
-1908
archive.js
cps/static/js/archive.js
+0
-357
archive.js
cps/static/js/archive/archive.js
+362
-0
rarvm.js
cps/static/js/archive/rarvm.js
+1009
-0
unrar.js
cps/static/js/archive/unrar.js
+1459
-0
untar.js
cps/static/js/archive/untar.js
+19
-22
unzip.js
cps/static/js/archive/unzip.js
+665
-0
bitstream.js
cps/static/js/io/bitstream.js
+288
-0
bytebuffer.js
cps/static/js/io/bytebuffer.js
+117
-0
bytestream.js
cps/static/js/io/bytestream.js
+0
-0
unrar.js
cps/static/js/unrar.js
+0
-902
unzip.js
cps/static/js/unzip.js
+0
-626
readcbr.html
cps/templates/readcbr.html
+1
-1
No files found.
cps/static/js/archive.js
deleted
100644 → 0
View file @
479b4b7d
/**
* archive.js
*
* Provides base functionality for unarchiving.
*
* Licensed under the MIT License
*
* Copyright(c) 2011 Google Inc.
*/
/* global bitjs */
var
bitjs
=
bitjs
||
{};
bitjs
.
archive
=
bitjs
.
archive
||
{};
(
function
()
{
// ===========================================================================
// Stolen from Closure because it's the best way to do Java-like inheritance.
bitjs
.
base
=
function
(
me
,
optMethodName
,
varArgs
)
{
var
caller
=
arguments
.
callee
.
caller
;
if
(
caller
.
superClass_
)
{
// This is a constructor. Call the superclass constructor.
return
caller
.
superClass_
.
constructor
.
apply
(
me
,
Array
.
prototype
.
slice
.
call
(
arguments
,
1
));
}
var
args
=
Array
.
prototype
.
slice
.
call
(
arguments
,
2
);
var
foundCaller
=
false
;
for
(
var
ctor
=
me
.
constructor
;
ctor
;
ctor
=
ctor
.
superClass_
&&
ctor
.
superClass_
.
constructor
)
{
if
(
ctor
.
prototype
[
optMethodName
]
===
caller
)
{
foundCaller
=
true
;
}
else
if
(
foundCaller
)
{
return
ctor
.
prototype
[
optMethodName
].
apply
(
me
,
args
);
}
}
// If we did not find the caller in the prototype chain,
// then one of two things happened:
// 1) The caller is an instance method.
// 2) This method was not called by the right caller.
if
(
me
[
optMethodName
]
===
caller
)
{
return
me
.
constructor
.
prototype
[
optMethodName
].
apply
(
me
,
args
);
}
else
{
throw
Error
(
"goog.base called from a method of one name "
+
"to a method of a different name"
);
}
};
bitjs
.
inherits
=
function
(
childCtor
,
parentCtor
)
{
/** @constructor */
function
TempCtor
()
{}
TempCtor
.
prototype
=
parentCtor
.
prototype
;
childCtor
.
superClass_
=
parentCtor
.
prototype
;
childCtor
.
prototype
=
new
TempCtor
();
childCtor
.
prototype
.
constructor
=
childCtor
;
};
// ===========================================================================
/**
* An unarchive event.
*
* @param {string} type The event type.
* @constructor
*/
bitjs
.
archive
.
UnarchiveEvent
=
function
(
type
)
{
/**
* The event type.
*
* @type {string}
*/
this
.
type
=
type
;
};
/**
* The UnarchiveEvent types.
*/
bitjs
.
archive
.
UnarchiveEvent
.
Type
=
{
START
:
"start"
,
PROGRESS
:
"progress"
,
EXTRACT
:
"extract"
,
FINISH
:
"finish"
,
INFO
:
"info"
,
ERROR
:
"error"
};
/**
* Useful for passing info up to the client (for debugging).
*
* @param {string} msg The info message.
*/
bitjs
.
archive
.
UnarchiveInfoEvent
=
function
(
msg
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
INFO
);
/**
* The information message.
*
* @type {string}
*/
this
.
msg
=
msg
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveInfoEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* An unrecoverable error has occured.
*
* @param {string} msg The error message.
*/
bitjs
.
archive
.
UnarchiveErrorEvent
=
function
(
msg
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
ERROR
);
/**
* The information message.
*
* @type {string}
*/
this
.
msg
=
msg
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveErrorEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Start event.
*
* @param {string} msg The info message.
*/
bitjs
.
archive
.
UnarchiveStartEvent
=
function
()
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
START
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveStartEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Finish event.
*
* @param {string} msg The info message.
*/
bitjs
.
archive
.
UnarchiveFinishEvent
=
function
()
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveFinishEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Progress event.
*/
bitjs
.
archive
.
UnarchiveProgressEvent
=
function
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
PROGRESS
);
this
.
currentFilename
=
currentFilename
;
this
.
currentFileNumber
=
currentFileNumber
;
this
.
currentBytesUnarchivedInFile
=
currentBytesUnarchivedInFile
;
this
.
totalFilesInArchive
=
totalFilesInArchive
;
this
.
currentBytesUnarchived
=
currentBytesUnarchived
;
this
.
totalUncompressedBytesInArchive
=
totalUncompressedBytesInArchive
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveProgressEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* All extracted files returned by an Unarchiver will implement
* the following interface:
*
* interface UnarchivedFile {
* string filename
* TypedArray fileData
* }
*
*/
/**
* Extract event.
*/
bitjs
.
archive
.
UnarchiveExtractEvent
=
function
(
unarchivedFile
)
{
bitjs
.
base
(
this
,
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
EXTRACT
);
/**
* @type {UnarchivedFile}
*/
this
.
unarchivedFile
=
unarchivedFile
;
};
bitjs
.
inherits
(
bitjs
.
archive
.
UnarchiveExtractEvent
,
bitjs
.
archive
.
UnarchiveEvent
);
/**
* Base class for all Unarchivers.
*
* @param {ArrayBuffer} arrayBuffer The Array Buffer.
* @param {string} optPathToBitJS Optional string for where the BitJS files are located.
* @constructor
*/
bitjs
.
archive
.
Unarchiver
=
function
(
arrayBuffer
,
optPathToBitJS
)
{
/**
* The ArrayBuffer object.
* @type {ArrayBuffer}
* @protected
*/
this
.
ab
=
arrayBuffer
;
/**
* The path to the BitJS files.
* @type {string}
* @private
*/
this
.
pathToBitJS_
=
optPathToBitJS
||
""
;
/**
* A map from event type to an array of listeners.
* @type {Map.<string, Array>}
*/
this
.
listeners_
=
{};
for
(
var
type
in
bitjs
.
archive
.
UnarchiveEvent
.
Type
)
{
this
.
listeners_
[
bitjs
.
archive
.
UnarchiveEvent
.
Type
[
type
]]
=
[];
}
};
/**
* Private web worker initialized during start().
* @type {Worker}
* @private
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
worker_
=
null
;
/**
* This method must be overridden by the subclass to return the script filename.
* @return {string} The script filename.
* @protected.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
getScriptFileName
=
function
()
{
throw
"Subclasses of AbstractUnarchiver must overload getScriptFileName()"
;
};
/**
* Adds an event listener for UnarchiveEvents.
*
* @param {string} Event type.
* @param {function} An event handler function.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
addEventListener
=
function
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
if
(
this
.
listeners_
[
type
].
indexOf
(
listener
)
===
-
1
)
{
this
.
listeners_
[
type
].
push
(
listener
);
}
}
};
/**
* Removes an event listener.
*
* @param {string} Event type.
* @param {EventListener|function} An event listener or handler function.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
removeEventListener
=
function
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
var
index
=
this
.
listeners_
[
type
].
indexOf
(
listener
);
if
(
index
!==
-
1
)
{
this
.
listeners_
[
type
].
splice
(
index
,
1
);
}
}
};
/**
* Receive an event and pass it to the listener functions.
*
* @param {bitjs.archive.UnarchiveEvent} e
* @private
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
handleWorkerEvent_
=
function
(
e
)
{
if
((
e
instanceof
bitjs
.
archive
.
UnarchiveEvent
||
e
.
type
)
&&
this
.
listeners_
[
e
.
type
]
instanceof
Array
)
{
this
.
listeners_
[
e
.
type
].
forEach
(
function
(
listener
)
{
listener
(
e
);
});
if
(
e
.
type
===
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
)
{
this
.
worker_
.
terminate
();
}
}
};
/**
* Starts the unarchive in a separate Web Worker thread and returns immediately.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
start
=
function
()
{
var
me
=
this
;
var
scriptFileName
=
this
.
pathToBitJS_
+
this
.
getScriptFileName
();
if
(
scriptFileName
)
{
this
.
worker_
=
new
Worker
(
scriptFileName
);
this
.
worker_
.
onerror
=
function
(
e
)
{
throw
e
;
};
this
.
worker_
.
onmessage
=
function
(
e
)
{
if
(
typeof
e
.
data
!==
"string"
)
{
// Assume that it is an UnarchiveEvent. Some browsers preserve the 'type'
// so that instanceof UnarchiveEvent returns true, but others do not.
me
.
handleWorkerEvent_
(
e
.
data
);
}
};
this
.
worker_
.
postMessage
({
file
:
this
.
ab
});
}
};
/**
* Terminates the Web Worker for this Unarchiver and returns immediately.
*/
bitjs
.
archive
.
Unarchiver
.
prototype
.
stop
=
function
()
{
if
(
this
.
worker_
)
{
this
.
worker_
.
terminate
();
}
};
/**
* Unzipper
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Unzipper
=
function
(
arrayBuffer
,
optPathToBitJS
)
{
bitjs
.
base
(
this
,
arrayBuffer
,
optPathToBitJS
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
Unzipper
,
bitjs
.
archive
.
Unarchiver
);
bitjs
.
archive
.
Unzipper
.
prototype
.
getScriptFileName
=
function
()
{
return
"unzip.js"
;
};
/**
* Unrarrer
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Unrarrer
=
function
(
arrayBuffer
,
optPathToBitJS
)
{
bitjs
.
base
(
this
,
arrayBuffer
,
optPathToBitJS
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
Unrarrer
,
bitjs
.
archive
.
Unarchiver
);
bitjs
.
archive
.
Unrarrer
.
prototype
.
getScriptFileName
=
function
()
{
return
"unrar.js"
;
};
/**
* Untarrer
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Untarrer
=
function
(
arrayBuffer
,
optPathToBitJS
)
{
bitjs
.
base
(
this
,
arrayBuffer
,
optPathToBitJS
);
};
bitjs
.
inherits
(
bitjs
.
archive
.
Untarrer
,
bitjs
.
archive
.
Unarchiver
);
bitjs
.
archive
.
Untarrer
.
prototype
.
getScriptFileName
=
function
()
{
return
"untar.js"
;
};
})();
cps/static/js/archive/archive.js
0 → 100644
View file @
c0d136cc
/**
* archive.js
*
* Provides base functionality for unarchiving.
*
* Licensed under the MIT License
*
* Copyright(c) 2011 Google Inc.
*/
var
bitjs
=
bitjs
||
{};
bitjs
.
archive
=
bitjs
.
archive
||
{};
/**
* An unarchive event.
*/
bitjs
.
archive
.
UnarchiveEvent
=
class
{
/**
* @param {string} type The event type.
*/
constructor
(
type
)
{
/**
* The event type.
* @type {string}
*/
this
.
type
=
type
;
}
}
/**
* The UnarchiveEvent types.
*/
bitjs
.
archive
.
UnarchiveEvent
.
Type
=
{
START
:
'start'
,
PROGRESS
:
'progress'
,
EXTRACT
:
'extract'
,
FINISH
:
'finish'
,
INFO
:
'info'
,
ERROR
:
'error'
};
/**
* Useful for passing info up to the client (for debugging).
*/
bitjs
.
archive
.
UnarchiveInfoEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
/**
* @param {string} msg The info message.
*/
constructor
(
msg
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
INFO
);
/**
* The information message.
* @type {string}
*/
this
.
msg
=
msg
;
}
}
/**
* An unrecoverable error has occured.
*/
bitjs
.
archive
.
UnarchiveErrorEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
/**
* @param {string} msg The error message.
*/
constructor
(
msg
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
ERROR
);
/**
* The information message.
* @type {string}
*/
this
.
msg
=
msg
;
}
}
/**
* Start event.
*/
bitjs
.
archive
.
UnarchiveStartEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
constructor
()
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
START
);
}
}
/**
* Finish event.
*/
bitjs
.
archive
.
UnarchiveFinishEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
constructor
()
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
);
}
}
/**
* Progress event.
*/
bitjs
.
archive
.
UnarchiveProgressEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
/**
* @param {string} currentFilename
* @param {number} currentFileNumber
* @param {number} currentBytesUnarchivedInFile
* @param {number} currentBytesUnarchived
* @param {number} totalUncompressedBytesInArchive
* @param {number} totalFilesInArchive
* @param {number} totalCompressedBytesRead
*/
constructor
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
totalCompressedBytesRead
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
PROGRESS
);
this
.
currentFilename
=
currentFilename
;
this
.
currentFileNumber
=
currentFileNumber
;
this
.
currentBytesUnarchivedInFile
=
currentBytesUnarchivedInFile
;
this
.
totalFilesInArchive
=
totalFilesInArchive
;
this
.
currentBytesUnarchived
=
currentBytesUnarchived
;
this
.
totalUncompressedBytesInArchive
=
totalUncompressedBytesInArchive
;
this
.
totalCompressedBytesRead
=
totalCompressedBytesRead
;
}
}
/**
* Extract event.
*/
bitjs
.
archive
.
UnarchiveExtractEvent
=
class
extends
bitjs
.
archive
.
UnarchiveEvent
{
/**
* @param {UnarchivedFile} unarchivedFile
*/
constructor
(
unarchivedFile
)
{
super
(
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
EXTRACT
);
/**
* @type {UnarchivedFile}
*/
this
.
unarchivedFile
=
unarchivedFile
;
}
}
/**
* All extracted files returned by an Unarchiver will implement
* the following interface:
*
* interface UnarchivedFile {
* string filename
* TypedArray fileData
* }
*
*/
/**
* Base class for all Unarchivers.
*/
bitjs
.
archive
.
Unarchiver
=
class
{
/**
* @param {ArrayBuffer} arrayBuffer The Array Buffer.
* @param {string} opt_pathToBitJS Optional string for where the BitJS files are located.
*/
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
/**
* The ArrayBuffer object.
* @type {ArrayBuffer}
* @protected
*/
this
.
ab
=
arrayBuffer
;
/**
* The path to the BitJS files.
* @type {string}
* @private
*/
this
.
pathToBitJS_
=
opt_pathToBitJS
||
'/'
;
/**
* A map from event type to an array of listeners.
* @type {Map.<string, Array>}
*/
this
.
listeners_
=
{};
for
(
let
type
in
bitjs
.
archive
.
UnarchiveEvent
.
Type
)
{
this
.
listeners_
[
bitjs
.
archive
.
UnarchiveEvent
.
Type
[
type
]]
=
[];
}
/**
* Private web worker initialized during start().
* @type {Worker}
* @private
*/
this
.
worker_
=
null
;
}
/**
* This method must be overridden by the subclass to return the script filename.
* @return {string} The script filename.
* @protected.
*/
getScriptFileName
()
{
throw
'Subclasses of AbstractUnarchiver must overload getScriptFileName()'
;
}
/**
* Adds an event listener for UnarchiveEvents.
*
* @param {string} Event type.
* @param {function} An event handler function.
*/
addEventListener
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
if
(
this
.
listeners_
[
type
].
indexOf
(
listener
)
==
-
1
)
{
this
.
listeners_
[
type
].
push
(
listener
);
}
}
}
/**
* Removes an event listener.
*
* @param {string} Event type.
* @param {EventListener|function} An event listener or handler function.
*/
removeEventListener
(
type
,
listener
)
{
if
(
type
in
this
.
listeners_
)
{
const
index
=
this
.
listeners_
[
type
].
indexOf
(
listener
);
if
(
index
!=
-
1
)
{
this
.
listeners_
[
type
].
splice
(
index
,
1
);
}
}
}
/**
* Receive an event and pass it to the listener functions.
*
* @param {bitjs.archive.UnarchiveEvent} e
* @private
*/
handleWorkerEvent_
(
e
)
{
if
((
e
instanceof
bitjs
.
archive
.
UnarchiveEvent
||
e
.
type
)
&&
this
.
listeners_
[
e
.
type
]
instanceof
Array
)
{
this
.
listeners_
[
e
.
type
].
forEach
(
function
(
listener
)
{
listener
(
e
)
});
if
(
e
.
type
==
bitjs
.
archive
.
UnarchiveEvent
.
Type
.
FINISH
)
{
this
.
worker_
.
terminate
();
}
}
else
{
console
.
log
(
e
);
}
}
/**
* Starts the unarchive in a separate Web Worker thread and returns immediately.
*/
start
()
{
const
me
=
this
;
const
scriptFileName
=
this
.
pathToBitJS_
+
this
.
getScriptFileName
();
if
(
scriptFileName
)
{
this
.
worker_
=
new
Worker
(
scriptFileName
);
this
.
worker_
.
onerror
=
function
(
e
)
{
console
.
log
(
'Worker error: message = '
+
e
.
message
);
throw
e
;
};
this
.
worker_
.
onmessage
=
function
(
e
)
{
if
(
typeof
e
.
data
==
'string'
)
{
// Just log any strings the workers pump our way.
console
.
log
(
e
.
data
);
}
else
{
// Assume that it is an UnarchiveEvent. Some browsers preserve the 'type'
// so that instanceof UnarchiveEvent returns true, but others do not.
me
.
handleWorkerEvent_
(
e
.
data
);
}
};
const
ab
=
this
.
ab
;
this
.
worker_
.
postMessage
({
file
:
ab
,
logToConsole
:
false
,
});
this
.
ab
=
null
;
}
}
/**
* Adds more bytes to the unarchiver's Worker thread.
*/
update
(
ab
)
{
if
(
this
.
worker_
)
{
this
.
worker_
.
postMessage
({
bytes
:
ab
});
}
}
/**
* Terminates the Web Worker for this Unarchiver and returns immediately.
*/
stop
()
{
if
(
this
.
worker_
)
{
this
.
worker_
.
terminate
();
}
}
}
/**
* Unzipper
*/
bitjs
.
archive
.
Unzipper
=
class
extends
bitjs
.
archive
.
Unarchiver
{
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
super
(
arrayBuffer
,
opt_pathToBitJS
);
}
getScriptFileName
()
{
return
'archive/unzip.js'
;
}
}
/**
* Unrarrer
*/
bitjs
.
archive
.
Unrarrer
=
class
extends
bitjs
.
archive
.
Unarchiver
{
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
super
(
arrayBuffer
,
opt_pathToBitJS
);
}
getScriptFileName
()
{
return
'archive/unrar.js'
;
}
}
/**
* Untarrer
* @extends {bitjs.archive.Unarchiver}
* @constructor
*/
bitjs
.
archive
.
Untarrer
=
class
extends
bitjs
.
archive
.
Unarchiver
{
constructor
(
arrayBuffer
,
opt_pathToBitJS
)
{
super
(
arrayBuffer
,
opt_pathToBitJS
);
}
getScriptFileName
()
{
return
'archive/untar.js'
;
};
}
/**
* Factory method that creates an unarchiver based on the byte signature found
* in the arrayBuffer.
* @param {ArrayBuffer} ab
* @param {string=} opt_pathToBitJS Path to the unarchiver script files.
* @return {bitjs.archive.Unarchiver}
*/
bitjs
.
archive
.
GetUnarchiver
=
function
(
ab
,
opt_pathToBitJS
)
{
if
(
ab
.
byteLength
<
10
)
{
return
null
;
}
let
unarchiver
=
null
;
const
pathToBitJS
=
opt_pathToBitJS
||
''
;
const
h
=
new
Uint8Array
(
ab
,
0
,
10
);
if
(
h
[
0
]
==
0x52
&&
h
[
1
]
==
0x61
&&
h
[
2
]
==
0x72
&&
h
[
3
]
==
0x21
)
{
// Rar!
unarchiver
=
new
bitjs
.
archive
.
Unrarrer
(
ab
,
pathToBitJS
);
}
else
if
(
h
[
0
]
==
0x50
&&
h
[
1
]
==
0x4B
)
{
// PK (Zip)
unarchiver
=
new
bitjs
.
archive
.
Unzipper
(
ab
,
pathToBitJS
);
}
else
{
// Try with tar
unarchiver
=
new
bitjs
.
archive
.
Untarrer
(
ab
,
pathToBitJS
);
}
return
unarchiver
;
};
cps/static/js/archive/rarvm.js
0 → 100644
View file @
c0d136cc
/**
* rarvm.js
*
* Licensed under the MIT License
*
* Copyright(c) 2017 Google Inc.
*/
/**
* CRC Implementation.
*/
const
CRCTab
=
new
Array
(
256
).
fill
(
0
);
// Helper functions between signed and unsigned integers.
/**
* -1 becomes 0xffffffff
*/
function
fromSigned32ToUnsigned32
(
val
)
{
return
(
val
<
0
)
?
(
val
+=
0x100000000
)
:
val
;
}
/**
* 0xffffffff becomes -1
*/
function
fromUnsigned32ToSigned32
(
val
)
{
return
(
val
>=
0x80000000
)
?
(
val
-=
0x100000000
)
:
val
;
}
/**
* -1 becomes 0xff
*/
function
fromSigned8ToUnsigned8
(
val
)
{
return
(
val
<
0
)
?
(
val
+=
0x100
)
:
val
;
}
/**
* 0xff becomes -1
*/
function
fromUnsigned8ToSigned8
(
val
)
{
return
(
val
>=
0x80
)
?
(
val
-=
0x100
)
:
val
;
}
function
InitCRC
()
{
for
(
let
i
=
0
;
i
<
256
;
++
i
)
{
let
c
=
i
;
for
(
let
j
=
0
;
j
<
8
;
++
j
)
{
// Read http://stackoverflow.com/questions/6798111/bitwise-operations-on-32-bit-unsigned-ints
// for the bitwise operator issue (JS interprets operands as 32-bit signed
// integers and we need to deal with unsigned ones here).
c
=
((
c
&
1
)
?
((
c
>>>
1
)
^
0xEDB88320
)
:
(
c
>>>
1
))
>>>
0
;
}
CRCTab
[
i
]
=
c
;
}
}
/**
* @param {number} startCRC
* @param {Uint8Array} arr
* @return {number}
*/
function
CRC
(
startCRC
,
arr
)
{
if
(
CRCTab
[
1
]
==
0
)
{
InitCRC
();
}
/*
#if defined(LITTLE_ENDIAN) && defined(PRESENT_INT32) && defined(ALLOW_NOT_ALIGNED_INT)
while (Size>0 && ((long)Data & 7))
{
StartCRC=CRCTab[(byte)(StartCRC^Data[0])]^(StartCRC>>8);
Size--;
Data++;
}
while (Size>=8)
{
StartCRC^=*(uint32 *)Data;
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC^=*(uint32 *)(Data+4);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
StartCRC=CRCTab[(byte)StartCRC]^(StartCRC>>8);
Data+=8;
Size-=8;
}
#endif
*/
for
(
let
i
=
0
;
i
<
arr
.
length
;
++
i
)
{
const
byte
=
((
startCRC
^
arr
[
i
])
>>>
0
)
&
0xff
;
startCRC
=
(
CRCTab
[
byte
]
^
(
startCRC
>>>
8
))
>>>
0
;
}
return
startCRC
;
}
// ============================================================================================== //
/**
* RarVM Implementation.
*/
const
VM_MEMSIZE
=
0x40000
;
const
VM_MEMMASK
=
(
VM_MEMSIZE
-
1
);
const
VM_GLOBALMEMADDR
=
0x3C000
;
const
VM_GLOBALMEMSIZE
=
0x2000
;
const
VM_FIXEDGLOBALSIZE
=
64
;
const
MAXWINSIZE
=
0x400000
;
const
MAXWINMASK
=
(
MAXWINSIZE
-
1
);
/**
*/
const
VM_Commands
=
{
VM_MOV
:
0
,
VM_CMP
:
1
,
VM_ADD
:
2
,
VM_SUB
:
3
,
VM_JZ
:
4
,
VM_JNZ
:
5
,
VM_INC
:
6
,
VM_DEC
:
7
,
VM_JMP
:
8
,
VM_XOR
:
9
,
VM_AND
:
10
,
VM_OR
:
11
,
VM_TEST
:
12
,
VM_JS
:
13
,
VM_JNS
:
14
,
VM_JB
:
15
,
VM_JBE
:
16
,
VM_JA
:
17
,
VM_JAE
:
18
,
VM_PUSH
:
19
,
VM_POP
:
20
,
VM_CALL
:
21
,
VM_RET
:
22
,
VM_NOT
:
23
,
VM_SHL
:
24
,
VM_SHR
:
25
,
VM_SAR
:
26
,
VM_NEG
:
27
,
VM_PUSHA
:
28
,
VM_POPA
:
29
,
VM_PUSHF
:
30
,
VM_POPF
:
31
,
VM_MOVZX
:
32
,
VM_MOVSX
:
33
,
VM_XCHG
:
34
,
VM_MUL
:
35
,
VM_DIV
:
36
,
VM_ADC
:
37
,
VM_SBB
:
38
,
VM_PRINT
:
39
,
/*
#ifdef VM_OPTIMIZE
VM_MOVB, VM_MOVD, VM_CMPB, VM_CMPD,
VM_ADDB, VM_ADDD, VM_SUBB, VM_SUBD, VM_INCB, VM_INCD, VM_DECB, VM_DECD,
VM_NEGB, VM_NEGD,
#endif
*/
// TODO: This enum value would be much larger if VM_OPTIMIZE.
VM_STANDARD
:
40
,
};
/**
*/
const
VM_StandardFilters
=
{
VMSF_NONE
:
0
,
VMSF_E8
:
1
,
VMSF_E8E9
:
2
,
VMSF_ITANIUM
:
3
,
VMSF_RGB
:
4
,
VMSF_AUDIO
:
5
,
VMSF_DELTA
:
6
,
VMSF_UPCASE
:
7
,
};
/**
*/
const
VM_Flags
=
{
VM_FC
:
1
,
VM_FZ
:
2
,
VM_FS
:
0x80000000
,
};
/**
*/
const
VM_OpType
=
{
VM_OPREG
:
0
,
VM_OPINT
:
1
,
VM_OPREGMEM
:
2
,
VM_OPNONE
:
3
,
};
/**
* Finds the key that maps to a given value in an object. This function is useful in debugging
* variables that use the above enums.
* @param {Object} obj
* @param {number} val
* @return {string} The key/enum value as a string.
*/
function
findKeyForValue
(
obj
,
val
)
{
for
(
let
key
in
obj
)
{
if
(
obj
[
key
]
===
val
)
{
return
key
;
}
}
return
null
;
}
function
getDebugString
(
obj
,
val
)
{
let
s
=
'Unknown.'
;
if
(
obj
===
VM_Commands
)
{
s
=
'VM_Commands.'
;
}
else
if
(
obj
===
VM_StandardFilters
)
{
s
=
'VM_StandardFilters.'
;
}
else
if
(
obj
===
VM_Flags
)
{
s
=
'VM_OpType.'
;
}
else
if
(
obj
===
VM_OpType
)
{
s
=
'VM_OpType.'
;
}
return
s
+
findKeyForValue
(
obj
,
val
);
}
/**
*/
class
VM_PreparedOperand
{
constructor
()
{
/** @type {VM_OpType} */
this
.
Type
;
/** @type {number} */
this
.
Data
=
0
;
/** @type {number} */
this
.
Base
=
0
;
// TODO: In C++ this is a uint*
/** @type {Array<number>} */
this
.
Addr
=
null
;
};
/** @return {string} */
toString
()
{
if
(
this
.
Type
===
null
)
{
return
'Error: Type was null in VM_PreparedOperand'
;
}
return
'{ '
+
'Type: '
+
getDebugString
(
VM_OpType
,
this
.
Type
)
+
', Data: '
+
this
.
Data
+
', Base: '
+
this
.
Base
+
' }'
;
}
}
/**
*/
class
VM_PreparedCommand
{
constructor
()
{
/** @type {VM_Commands} */
this
.
OpCode
;
/** @type {boolean} */
this
.
ByteMode
=
false
;
/** @type {VM_PreparedOperand} */
this
.
Op1
=
new
VM_PreparedOperand
();
/** @type {VM_PreparedOperand} */
this
.
Op2
=
new
VM_PreparedOperand
();
}
/** @return {string} */
toString
(
indent
)
{
if
(
this
.
OpCode
===
null
)
{
return
'Error: OpCode was null in VM_PreparedCommand'
;
}
indent
=
indent
||
''
;
return
indent
+
'{
\
n'
+
indent
+
' OpCode: '
+
getDebugString
(
VM_Commands
,
this
.
OpCode
)
+
',
\
n'
+
indent
+
' ByteMode: '
+
this
.
ByteMode
+
',
\
n'
+
indent
+
' Op1: '
+
this
.
Op1
.
toString
()
+
',
\
n'
+
indent
+
' Op2: '
+
this
.
Op2
.
toString
()
+
',
\
n'
+
indent
+
'}'
;
}
}
/**
*/
class
VM_PreparedProgram
{
constructor
()
{
/** @type {Array<VM_PreparedCommand>} */
this
.
Cmd
=
[];
/** @type {Array<VM_PreparedCommand>} */
this
.
AltCmd
=
null
;
/** @type {Uint8Array} */
this
.
GlobalData
=
new
Uint8Array
();
/** @type {Uint8Array} */
this
.
StaticData
=
new
Uint8Array
();
// static data contained in DB operators
/** @type {Uint32Array} */
this
.
InitR
=
new
Uint32Array
(
7
);
/**
* A pointer to bytes that have been filtered by a program.
* @type {Uint8Array}
*/
this
.
FilteredData
=
null
;
}
/** @return {string} */
toString
()
{
let
s
=
'{
\
n Cmd: [
\
n'
;
for
(
let
i
=
0
;
i
<
this
.
Cmd
.
length
;
++
i
)
{
s
+=
this
.
Cmd
[
i
].
toString
(
' '
)
+
',
\
n'
;
}
s
+=
'],
\
n'
;
// TODO: Dump GlobalData, StaticData, InitR?
s
+=
' }
\
n'
;
return
s
;
}
}
/**
*/
class
UnpackFilter
{
constructor
()
{
/** @type {number} */
this
.
BlockStart
=
0
;
/** @type {number} */
this
.
BlockLength
=
0
;
/** @type {number} */
this
.
ExecCount
=
0
;
/** @type {boolean} */
this
.
NextWindow
=
false
;
// position of parent filter in Filters array used as prototype for filter
// in PrgStack array. Not defined for filters in Filters array.
/** @type {number} */
this
.
ParentFilter
=
null
;
/** @type {VM_PreparedProgram} */
this
.
Prg
=
new
VM_PreparedProgram
();
}
}
const
VMCF_OP0
=
0
;
const
VMCF_OP1
=
1
;
const
VMCF_OP2
=
2
;
const
VMCF_OPMASK
=
3
;
const
VMCF_BYTEMODE
=
4
;
const
VMCF_JUMP
=
8
;
const
VMCF_PROC
=
16
;
const
VMCF_USEFLAGS
=
32
;
const
VMCF_CHFLAGS
=
64
;
const
VM_CmdFlags
=
[
/* VM_MOV */
VMCF_OP2
|
VMCF_BYTEMODE
,
/* VM_CMP */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_ADD */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_SUB */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_JZ */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_JNZ */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_INC */
VMCF_OP1
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_DEC */
VMCF_OP1
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_JMP */
VMCF_OP1
|
VMCF_JUMP
,
/* VM_XOR */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_AND */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_OR */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_TEST */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_JS */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_JNS */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_JB */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_JBE */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_JA */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_JAE */
VMCF_OP1
|
VMCF_JUMP
|
VMCF_USEFLAGS
,
/* VM_PUSH */
VMCF_OP1
,
/* VM_POP */
VMCF_OP1
,
/* VM_CALL */
VMCF_OP1
|
VMCF_PROC
,
/* VM_RET */
VMCF_OP0
|
VMCF_PROC
,
/* VM_NOT */
VMCF_OP1
|
VMCF_BYTEMODE
,
/* VM_SHL */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_SHR */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_SAR */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_NEG */
VMCF_OP1
|
VMCF_BYTEMODE
|
VMCF_CHFLAGS
,
/* VM_PUSHA */
VMCF_OP0
,
/* VM_POPA */
VMCF_OP0
,
/* VM_PUSHF */
VMCF_OP0
|
VMCF_USEFLAGS
,
/* VM_POPF */
VMCF_OP0
|
VMCF_CHFLAGS
,
/* VM_MOVZX */
VMCF_OP2
,
/* VM_MOVSX */
VMCF_OP2
,
/* VM_XCHG */
VMCF_OP2
|
VMCF_BYTEMODE
,
/* VM_MUL */
VMCF_OP2
|
VMCF_BYTEMODE
,
/* VM_DIV */
VMCF_OP2
|
VMCF_BYTEMODE
,
/* VM_ADC */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_USEFLAGS
|
VMCF_CHFLAGS
,
/* VM_SBB */
VMCF_OP2
|
VMCF_BYTEMODE
|
VMCF_USEFLAGS
|
VMCF_CHFLAGS
,
/* VM_PRINT */
VMCF_OP0
,
];
/**
*/
class
StandardFilterSignature
{
/**
* @param {number} length
* @param {number} crc
* @param {VM_StandardFilters} type
*/
constructor
(
length
,
crc
,
type
)
{
/** @type {number} */
this
.
Length
=
length
;
/** @type {number} */
this
.
CRC
=
crc
;
/** @type {VM_StandardFilters} */
this
.
Type
=
type
;
}
}
/**
* @type {Array<StandardFilterSignature>}
*/
const
StdList
=
[
new
StandardFilterSignature
(
53
,
0xad576887
,
VM_StandardFilters
.
VMSF_E8
),
new
StandardFilterSignature
(
57
,
0x3cd7e57e
,
VM_StandardFilters
.
VMSF_E8E9
),
new
StandardFilterSignature
(
120
,
0x3769893f
,
VM_StandardFilters
.
VMSF_ITANIUM
),
new
StandardFilterSignature
(
29
,
0x0e06077d
,
VM_StandardFilters
.
VMSF_DELTA
),
new
StandardFilterSignature
(
149
,
0x1c2c5dc8
,
VM_StandardFilters
.
VMSF_RGB
),
new
StandardFilterSignature
(
216
,
0xbc85e701
,
VM_StandardFilters
.
VMSF_AUDIO
),
new
StandardFilterSignature
(
40
,
0x46b9c560
,
VM_StandardFilters
.
VMSF_UPCASE
),
];
/**
* @constructor
*/
class
RarVM
{
constructor
()
{
/** @private {Uint8Array} */
this
.
mem_
=
null
;
/** @private {Uint32Array<number>} */
this
.
R_
=
new
Uint32Array
(
8
);
/** @private {number} */
this
.
flags_
=
0
;
}
/**
* Initializes the memory of the VM.
*/
init
()
{
if
(
!
this
.
mem_
)
{
this
.
mem_
=
new
Uint8Array
(
VM_MEMSIZE
);
}
}
/**
* @param {Uint8Array} code
* @return {VM_StandardFilters}
*/
isStandardFilter
(
code
)
{
const
codeCRC
=
(
CRC
(
0xffffffff
,
code
,
code
.
length
)
^
0xffffffff
)
>>>
0
;
for
(
let
i
=
0
;
i
<
StdList
.
length
;
++
i
)
{
if
(
StdList
[
i
].
CRC
==
codeCRC
&&
StdList
[
i
].
Length
==
code
.
length
)
return
StdList
[
i
].
Type
;
}
return
VM_StandardFilters
.
VMSF_NONE
;
}
/**
* @param {VM_PreparedOperand} op
* @param {boolean} byteMode
* @param {bitjs.io.BitStream} bstream A rtl bit stream.
*/
decodeArg
(
op
,
byteMode
,
bstream
)
{
const
data
=
bstream
.
peekBits
(
16
);
if
(
data
&
0x8000
)
{
op
.
Type
=
VM_OpType
.
VM_OPREG
;
// Operand is register (R[0]..R[7])
bstream
.
readBits
(
1
);
// 1 flag bit and...
op
.
Data
=
bstream
.
readBits
(
3
);
// ... 3 register number bits
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]]
// TODO &R[Op.Data] // Register address
}
else
{
if
((
data
&
0xc000
)
==
0
)
{
op
.
Type
=
VM_OpType
.
VM_OPINT
;
// Operand is integer
bstream
.
readBits
(
2
);
// 2 flag bits
if
(
byteMode
)
{
op
.
Data
=
bstream
.
readBits
(
8
);
// Byte integer.
}
else
{
op
.
Data
=
RarVM
.
readData
(
bstream
);
// 32 bit integer.
}
}
else
{
// Operand is data addressed by register data, base address or both.
op
.
Type
=
VM_OpType
.
VM_OPREGMEM
;
if
((
data
&
0x2000
)
==
0
)
{
bstream
.
readBits
(
3
);
// 3 flag bits
// Base address is zero, just use the address from register.
op
.
Data
=
bstream
.
readBits
(
3
);
// (Data>>10)&7
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]];
// TODO &R[op.Data]
op
.
Base
=
0
;
}
else
{
bstream
.
readBits
(
4
);
// 4 flag bits
if
((
data
&
0x1000
)
==
0
)
{
// Use both register and base address.
op
.
Data
=
bstream
.
readBits
(
3
);
op
.
Addr
=
[
this
.
R_
[
op
.
Data
]];
// TODO &R[op.Data]
}
else
{
// Use base address only. Access memory by fixed address.
op
.
Data
=
0
;
}
op
.
Base
=
RarVM
.
readData
(
bstream
);
// Read base address.
}
}
}
}
/**
* @param {VM_PreparedProgram} prg
*/
execute
(
prg
)
{
this
.
R_
.
set
(
prg
.
InitR
);
const
globalSize
=
Math
.
min
(
prg
.
GlobalData
.
length
,
VM_GLOBALMEMSIZE
);
if
(
globalSize
)
{
this
.
mem_
.
set
(
prg
.
GlobalData
.
subarray
(
0
,
globalSize
),
VM_GLOBALMEMADDR
);
}
const
staticSize
=
Math
.
min
(
prg
.
StaticData
.
length
,
VM_GLOBALMEMSIZE
-
globalSize
);
if
(
staticSize
)
{
this
.
mem_
.
set
(
prg
.
StaticData
.
subarray
(
0
,
staticSize
),
VM_GLOBALMEMADDR
+
globalSize
);
}
this
.
R_
[
7
]
=
VM_MEMSIZE
;
this
.
flags_
=
0
;
const
preparedCodes
=
prg
.
AltCmd
?
prg
.
AltCmd
:
prg
.
Cmd
;
if
(
prg
.
Cmd
.
length
>
0
&&
!
this
.
executeCode
(
preparedCodes
))
{
// Invalid VM program. Let's replace it with 'return' command.
preparedCode
.
OpCode
=
VM_Commands
.
VM_RET
;
}
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
let
newBlockPos
=
dataView
.
getUint32
(
0x20
,
true
/* little endian */
)
&
VM_MEMMASK
;
const
newBlockSize
=
dataView
.
getUint32
(
0x1c
,
true
/* little endian */
)
&
VM_MEMMASK
;
if
(
newBlockPos
+
newBlockSize
>=
VM_MEMSIZE
)
{
newBlockPos
=
newBlockSize
=
0
;
}
prg
.
FilteredData
=
this
.
mem_
.
subarray
(
newBlockPos
,
newBlockPos
+
newBlockSize
);
prg
.
GlobalData
=
new
Uint8Array
(
0
);
const
dataSize
=
Math
.
min
(
dataView
.
getUint32
(
0x30
),
(
VM_GLOBALMEMSIZE
-
VM_FIXEDGLOBALSIZE
));
if
(
dataSize
!=
0
)
{
const
len
=
dataSize
+
VM_FIXEDGLOBALSIZE
;
prg
.
GlobalData
=
new
Uint8Array
(
len
);
prg
.
GlobalData
.
set
(
mem
.
subarray
(
VM_GLOBALMEMADDR
,
VM_GLOBALMEMADDR
+
len
));
}
}
/**
* @param {Array<VM_PreparedCommand>} preparedCodes
* @return {boolean}
*/
executeCode
(
preparedCodes
)
{
let
codeIndex
=
0
;
let
cmd
=
preparedCodes
[
codeIndex
];
// TODO: Why is this an infinite loop instead of just returning
// when a VM_RET is hit?
while
(
1
)
{
switch
(
cmd
.
OpCode
)
{
case
VM_Commands
.
VM_RET
:
if
(
this
.
R_
[
7
]
>=
VM_MEMSIZE
)
{
return
true
;
}
//SET_IP(GET_VALUE(false,(uint *)&Mem[R[7] & VM_MEMMASK]));
this
.
R_
[
7
]
+=
4
;
continue
;
case
VM_Commands
.
VM_STANDARD
:
this
.
executeStandardFilter
(
cmd
.
Op1
.
Data
);
break
;
default
:
console
.
error
(
'RarVM OpCode not supported: '
+
getDebugString
(
VM_Commands
,
cmd
.
OpCode
));
break
;
}
// switch (cmd.OpCode)
codeIndex
++
;
cmd
=
preparedCodes
[
codeIndex
];
}
}
/**
* @param {number} filterType
*/
executeStandardFilter
(
filterType
)
{
switch
(
filterType
)
{
case
VM_StandardFilters
.
VMSF_RGB
:
{
const
dataSize
=
this
.
R_
[
4
];
const
width
=
this
.
R_
[
0
]
-
3
;
const
posR
=
this
.
R_
[
1
];
const
Channels
=
3
;
let
srcOffset
=
0
;
let
destOffset
=
dataSize
;
// byte *SrcData=Mem,*DestData=SrcData+DataSize;
// SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
/* offset */
);
dataView
.
setUint32
(
0x20
/* byte offset */
,
dataSize
/* value */
,
true
/* little endian */
);
if
(
dataSize
>=
(
VM_GLOBALMEMADDR
/
2
)
||
posR
<
0
)
{
break
;
}
for
(
let
curChannel
=
0
;
curChannel
<
Channels
;
++
curChannel
)
{
let
prevByte
=
0
;
for
(
let
i
=
curChannel
;
i
<
dataSize
;
i
+=
Channels
)
{
let
predicted
;
const
upperPos
=
i
-
width
;
if
(
upperPos
>=
3
)
{
const
upperByte
=
this
.
mem_
[
destOffset
+
upperPos
];
const
upperLeftByte
=
this
.
mem_
[
destOffset
+
upperPos
-
3
];
predicted
=
prevByte
+
upperByte
-
upperLeftByte
;
const
pa
=
Math
.
abs
(
predicted
-
prevByte
);
const
pb
=
Math
.
abs
(
predicted
-
upperByte
);
const
pc
=
Math
.
abs
(
predicted
-
upperLeftByte
);
if
(
pa
<=
pb
&&
pa
<=
pc
)
{
predicted
=
prevByte
;
}
else
if
(
pb
<=
pc
)
{
predicted
=
upperByte
;
}
else
{
predicted
=
upperLeftByte
;
}
}
else
{
predicted
=
prevByte
;
}
//DestData[I]=PrevByte=(byte)(Predicted-*(SrcData++));
prevByte
=
(
predicted
-
this
.
mem_
[
srcOffset
++
])
&
0xff
;
this
.
mem_
[
destOffset
+
i
]
=
prevByte
;
}
}
for
(
let
i
=
posR
,
border
=
dataSize
-
2
;
i
<
border
;
i
+=
3
)
{
const
g
=
this
.
mem_
[
destOffset
+
i
+
1
];
this
.
mem_
[
destOffset
+
i
]
+=
g
;
this
.
mem_
[
destOffset
+
i
+
2
]
+=
g
;
}
break
;
}
// The C++ version of this standard filter uses an odd mixture of
// signed and unsigned integers, bytes and various casts. Careful!
case
VM_StandardFilters
.
VMSF_AUDIO
:
{
const
dataSize
=
this
.
R_
[
4
];
const
channels
=
this
.
R_
[
0
];
let
srcOffset
=
0
;
let
destOffset
=
dataSize
;
//SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
dataView
.
setUint32
(
0x20
/* byte offset */
,
dataSize
/* value */
,
true
/* little endian */
);
if
(
dataSize
>=
VM_GLOBALMEMADDR
/
2
)
{
break
;
}
for
(
let
curChannel
=
0
;
curChannel
<
channels
;
++
curChannel
)
{
let
prevByte
=
0
;
// uint
let
prevDelta
=
0
;
// uint
let
dif
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
];
let
d1
=
0
,
d2
=
0
,
d3
;
// ints
let
k1
=
0
,
k2
=
0
,
k3
=
0
;
// ints
for
(
var
i
=
curChannel
,
byteCount
=
0
;
i
<
dataSize
;
i
+=
channels
,
++
byteCount
)
{
d3
=
d2
;
d2
=
fromUnsigned32ToSigned32
(
prevDelta
-
d1
);
d1
=
fromUnsigned32ToSigned32
(
prevDelta
);
let
predicted
=
fromSigned32ToUnsigned32
(
8
*
prevByte
+
k1
*
d1
+
k2
*
d2
+
k3
*
d3
);
// uint
predicted
=
(
predicted
>>>
3
)
&
0xff
;
let
curByte
=
this
.
mem_
[
srcOffset
++
];
// uint
// Predicted-=CurByte;
predicted
=
fromSigned32ToUnsigned32
(
predicted
-
curByte
);
this
.
mem_
[
destOffset
+
i
]
=
(
predicted
&
0xff
);
// PrevDelta=(signed char)(Predicted-PrevByte);
// where Predicted, PrevByte, PrevDelta are all unsigned int (32)
// casting this subtraction to a (signed char) is kind of invalid
// but it does the following:
// - do the subtraction
// - get the bottom 8 bits of the result
// - if it was >= 0x80, then the value is negative (subtract 0x100)
// - if the value is now negative, add 0x100000000 to make unsigned
//
// Example:
// predicted = 101
// prevByte = 4294967158
// (predicted - prevByte) = -4294967057
// take lower 8 bits: 1110 1111 = 239
// since > 127, subtract 256 = -17
// since < 0, add 0x100000000 = 4294967279
prevDelta
=
fromSigned32ToUnsigned32
(
fromUnsigned8ToSigned8
((
predicted
-
prevByte
)
&
0xff
));
prevByte
=
predicted
;
// int D=((signed char)CurByte)<<3;
let
curByteAsSignedChar
=
fromUnsigned8ToSigned8
(
curByte
);
// signed char
let
d
=
(
curByteAsSignedChar
<<
3
);
dif
[
0
]
+=
Math
.
abs
(
d
);
dif
[
1
]
+=
Math
.
abs
(
d
-
d1
);
dif
[
2
]
+=
Math
.
abs
(
d
+
d1
);
dif
[
3
]
+=
Math
.
abs
(
d
-
d2
);
dif
[
4
]
+=
Math
.
abs
(
d
+
d2
);
dif
[
5
]
+=
Math
.
abs
(
d
-
d3
);
dif
[
6
]
+=
Math
.
abs
(
d
+
d3
);
if
((
byteCount
&
0x1f
)
==
0
)
{
let
minDif
=
dif
[
0
],
numMinDif
=
0
;
dif
[
0
]
=
0
;
for
(
let
j
=
1
;
j
<
7
;
++
j
)
{
if
(
dif
[
j
]
<
minDif
)
{
minDif
=
dif
[
j
];
numMinDif
=
j
;
}
dif
[
j
]
=
0
;
}
switch
(
numMinDif
)
{
case
1
:
if
(
k1
>=-
16
)
k1
--
;
break
;
case
2
:
if
(
k1
<
16
)
k1
++
;
break
;
case
3
:
if
(
k2
>=-
16
)
k2
--
;
break
;
case
4
:
if
(
k2
<
16
)
k2
++
;
break
;
case
5
:
if
(
k3
>=-
16
)
k3
--
;
break
;
case
6
:
if
(
k3
<
16
)
k3
++
;
break
;
}
}
}
}
break
;
}
case
VM_StandardFilters
.
VMSF_DELTA
:
{
const
dataSize
=
this
.
R_
[
4
];
const
channels
=
this
.
R_
[
0
];
let
srcPos
=
0
;
const
border
=
dataSize
*
2
;
//SET_VALUE(false,&Mem[VM_GLOBALMEMADDR+0x20],DataSize);
const
dataView
=
new
DataView
(
this
.
mem_
.
buffer
,
VM_GLOBALMEMADDR
);
dataView
.
setUint32
(
0x20
/* byte offset */
,
dataSize
/* value */
,
true
/* little endian */
);
if
(
dataSize
>=
VM_GLOBALMEMADDR
/
2
)
{
break
;
}
// Bytes from same channels are grouped to continual data blocks,
// so we need to place them back to their interleaving positions.
for
(
let
curChannel
=
0
;
curChannel
<
channels
;
++
curChannel
)
{
let
prevByte
=
0
;
for
(
let
destPos
=
dataSize
+
curChannel
;
destPos
<
border
;
destPos
+=
channels
)
{
prevByte
=
(
prevByte
-
this
.
mem_
[
srcPos
++
])
&
0xff
;
this
.
mem_
[
destPos
]
=
prevByte
;
}
}
break
;
}
default
:
console
.
error
(
'RarVM Standard Filter not supported: '
+
getDebugString
(
VM_StandardFilters
,
filterType
));
break
;
}
}
/**
* @param {Uint8Array} code
* @param {VM_PreparedProgram} prg
*/
prepare
(
code
,
prg
)
{
let
codeSize
=
code
.
length
;
//InitBitInput();
//memcpy(InBuf,Code,Min(CodeSize,BitInput::MAX_SIZE));
const
bstream
=
new
bitjs
.
io
.
BitStream
(
code
.
buffer
,
true
/* rtl */
);
// Calculate the single byte XOR checksum to check validity of VM code.
let
xorSum
=
0
;
for
(
let
i
=
1
;
i
<
codeSize
;
++
i
)
{
xorSum
^=
code
[
i
];
}
bstream
.
readBits
(
8
);
prg
.
Cmd
=
[];
// TODO: Is this right? I don't see it being done in rarvm.cpp.
// VM code is valid if equal.
if
(
xorSum
==
code
[
0
])
{
const
filterType
=
this
.
isStandardFilter
(
code
);
if
(
filterType
!=
VM_StandardFilters
.
VMSF_NONE
)
{
// VM code is found among standard filters.
const
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
curCmd
.
OpCode
=
VM_Commands
.
VM_STANDARD
;
curCmd
.
Op1
.
Data
=
filterType
;
// TODO: Addr=&CurCmd->Op1.Data
curCmd
.
Op1
.
Addr
=
[
curCmd
.
Op1
.
Data
];
curCmd
.
Op2
.
Addr
=
[
null
];
// &CurCmd->Op2.Data;
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
codeSize
=
0
;
}
const
dataFlag
=
bstream
.
readBits
(
1
);
// Read static data contained in DB operators. This data cannot be
// changed, it is a part of VM code, not a filter parameter.
if
(
dataFlag
&
0x8000
)
{
const
dataSize
=
RarVM
.
readData
(
bstream
)
+
1
;
// TODO: This accesses the byte pointer of the bstream directly. Is that ok?
for
(
let
i
=
0
;
i
<
bstream
.
bytePtr
<
codeSize
&&
i
<
dataSize
;
++
i
)
{
// Append a byte to the program's static data.
const
newStaticData
=
new
Uint8Array
(
prg
.
StaticData
.
length
+
1
);
newStaticData
.
set
(
prg
.
StaticData
);
newStaticData
[
newStaticData
.
length
-
1
]
=
bstream
.
readBits
(
8
);
prg
.
StaticData
=
newStaticData
;
}
}
while
(
bstream
.
bytePtr
<
codeSize
)
{
const
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
// Prg->Cmd.Add(1)
const
flag
=
bstream
.
peekBits
(
1
);
if
(
!
flag
)
{
// (Data&0x8000)==0
curCmd
.
OpCode
=
bstream
.
readBits
(
4
);
}
else
{
curCmd
.
OpCode
=
(
bstream
.
readBits
(
6
)
-
24
);
}
if
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
VMCF_BYTEMODE
)
{
curCmd
.
ByteMode
=
(
bstream
.
readBits
(
1
)
!=
0
);
}
else
{
curCmd
.
ByteMode
=
0
;
}
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
const
opNum
=
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
VMCF_OPMASK
);
curCmd
.
Op1
.
Addr
=
null
;
curCmd
.
Op2
.
Addr
=
null
;
if
(
opNum
>
0
)
{
this
.
decodeArg
(
curCmd
.
Op1
,
curCmd
.
ByteMode
,
bstream
);
// reading the first operand
if
(
opNum
==
2
)
{
this
.
decodeArg
(
curCmd
.
Op2
,
curCmd
.
ByteMode
,
bstream
);
// reading the second operand
}
else
{
if
(
curCmd
.
Op1
.
Type
==
VM_OpType
.
VM_OPINT
&&
(
VM_CmdFlags
[
curCmd
.
OpCode
]
&
(
VMCF_JUMP
|
VMCF_PROC
)))
{
// Calculating jump distance.
let
distance
=
curCmd
.
Op1
.
Data
;
if
(
distance
>=
256
)
{
distance
-=
256
;
}
else
{
if
(
distance
>=
136
)
{
distance
-=
264
;
}
else
{
if
(
distance
>=
16
)
{
distance
-=
8
;
}
else
{
if
(
distance
>=
8
)
{
distance
-=
16
;
}
}
}
distance
+=
prg
.
Cmd
.
length
;
}
curCmd
.
Op1
.
Data
=
distance
;
}
}
}
// if (OpNum>0)
}
// while ((uint)InAddr<CodeSize)
}
// if (XorSum==Code[0])
const
curCmd
=
new
VM_PreparedCommand
();
prg
.
Cmd
.
push
(
curCmd
);
curCmd
.
OpCode
=
VM_Commands
.
VM_RET
;
// TODO: Addr=&CurCmd->Op1.Data
curCmd
.
Op1
.
Addr
=
[
curCmd
.
Op1
.
Data
];
curCmd
.
Op2
.
Addr
=
[
curCmd
.
Op2
.
Data
];
curCmd
.
Op1
.
Type
=
VM_OpType
.
VM_OPNONE
;
curCmd
.
Op2
.
Type
=
VM_OpType
.
VM_OPNONE
;
// If operand 'Addr' field has not been set by DecodeArg calls above,
// let's set it to point to operand 'Data' field. It is necessary for
// VM_OPINT type operands (usual integers) or maybe if something was
// not set properly for other operands. 'Addr' field is required
// for quicker addressing of operand data.
for
(
let
i
=
0
;
i
<
prg
.
Cmd
.
length
;
++
i
)
{
const
cmd
=
prg
.
Cmd
[
i
];
if
(
cmd
.
Op1
.
Addr
==
null
)
{
cmd
.
Op1
.
Addr
=
[
cmd
.
Op1
.
Data
];
}
if
(
cmd
.
Op2
.
Addr
==
null
)
{
cmd
.
Op2
.
Addr
=
[
cmd
.
Op2
.
Data
];
}
}
/*
#ifdef VM_OPTIMIZE
if (CodeSize!=0)
Optimize(Prg);
#endif
*/
}
/**
* @param {Uint8Array} arr The byte array to set a value in.
* @param {number} value The unsigned 32-bit value to set.
* @param {number} offset Offset into arr to start setting the value, defaults to 0.
*/
setLowEndianValue
(
arr
,
value
,
offset
)
{
const
i
=
offset
||
0
;
arr
[
i
]
=
value
&
0xff
;
arr
[
i
+
1
]
=
(
value
>>>
8
)
&
0xff
;
arr
[
i
+
2
]
=
(
value
>>>
16
)
&
0xff
;
arr
[
i
+
3
]
=
(
value
>>>
24
)
&
0xff
;
}
/**
* Sets a number of bytes of the VM memory at the given position from a
* source buffer of bytes.
* @param {number} pos The position in the VM memory to start writing to.
* @param {Uint8Array} buffer The source buffer of bytes.
* @param {number} dataSize The number of bytes to set.
*/
setMemory
(
pos
,
buffer
,
dataSize
)
{
if
(
pos
<
VM_MEMSIZE
)
{
const
numBytes
=
Math
.
min
(
dataSize
,
VM_MEMSIZE
-
pos
);
for
(
let
i
=
0
;
i
<
numBytes
;
++
i
)
{
this
.
mem_
[
pos
+
i
]
=
buffer
[
i
];
}
}
}
/**
* Static function that reads in the next set of bits for the VM
* (might return 4, 8, 16 or 32 bits).
* @param {bitjs.io.BitStream} bstream A RTL bit stream.
* @return {number} The value of the bits read.
*/
static
readData
(
bstream
)
{
// Read in the first 2 bits.
const
flags
=
bstream
.
readBits
(
2
);
switch
(
flags
)
{
// Data&0xc000
// Return the next 4 bits.
case
0
:
return
bstream
.
readBits
(
4
);
// (Data>>10)&0xf
case
1
:
// 0x4000
// 0x3c00 => 0011 1100 0000 0000
if
(
bstream
.
peekBits
(
4
)
==
0
)
{
// (Data&0x3c00)==0
// Skip the 4 zero bits.
bstream
.
readBits
(
4
);
// Read in the next 8 and pad with 1s to 32 bits.
return
(
0xffffff00
|
bstream
.
readBits
(
8
))
>>>
0
;
// ((Data>>2)&0xff)
}
// Else, read in the next 8.
return
bstream
.
readBits
(
8
);
// Read in the next 16.
case
2
:
// 0x8000
const
val
=
bstream
.
getBits
();
bstream
.
readBits
(
16
);
return
val
;
//bstream.readBits(16);
// case 3
default
:
return
(
bstream
.
readBits
(
16
)
<<
16
)
|
bstream
.
readBits
(
16
);
}
}
}
// ============================================================================================== //
cps/static/js/archive/unrar.js
0 → 100644
View file @
c0d136cc
/**
* unrar.js
*
* Licensed under the MIT License
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 antimatter15
*/
// TODO: Rewrite the RarLocalHeader parsing to use a ByteStream instead
// of a BitStream so that it throws properly when not enough bytes are
// present.
// This file expects to be invoked as a Worker (see onmessage below).
importScripts
(
'../io/bitstream.js'
);
importScripts
(
'../io/bytestream.js'
);
importScripts
(
'../io/bytebuffer.js'
);
importScripts
(
'archive.js'
);
importScripts
(
'rarvm.js'
);
const
UnarchiveState
=
{
NOT_STARTED
:
0
,
UNARCHIVING
:
1
,
WAITING
:
2
,
FINISHED
:
3
,
};
// State - consider putting these into a class.
let
unarchiveState
=
UnarchiveState
.
NOT_STARTED
;
let
bytestream
=
null
;
let
allLocalFiles
=
null
;
let
logToConsole
=
false
;
// Progress variables.
let
currentFilename
=
''
;
let
currentFileNumber
=
0
;
let
currentBytesUnarchivedInFile
=
0
;
let
currentBytesUnarchived
=
0
;
let
totalUncompressedBytesInArchive
=
0
;
let
totalFilesInArchive
=
0
;
// Helper functions.
const
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
const
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
const
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
parseInt
(
bytestream
.
getNumBytesRead
(),
10
),
));
};
// shows a byte value as its hex representation
const
nibble
=
'0123456789ABCDEF'
;
const
byteValueToHexString
=
function
(
num
)
{
return
nibble
[
num
>>
4
]
+
nibble
[
num
&
0xF
];
};
const
twoByteValueToHexString
=
function
(
num
)
{
return
nibble
[(
num
>>
12
)
&
0xF
]
+
nibble
[(
num
>>
8
)
&
0xF
]
+
nibble
[(
num
>>
4
)
&
0xF
]
+
nibble
[
num
&
0xF
];
};
// Volume Types
const
MARK_HEAD
=
0x72
;
const
MAIN_HEAD
=
0x73
;
const
FILE_HEAD
=
0x74
;
const
COMM_HEAD
=
0x75
;
const
AV_HEAD
=
0x76
;
const
SUB_HEAD
=
0x77
;
const
PROTECT_HEAD
=
0x78
;
const
SIGN_HEAD
=
0x79
;
const
NEWSUB_HEAD
=
0x7a
;
const
ENDARC_HEAD
=
0x7b
;
// ============================================================================================== //
/**
*/
class
RarVolumeHeader
{
/**
* @param {bitjs.io.ByteStream} bstream
*/
constructor
(
bstream
)
{
let
headBytesRead
=
0
;
// byte 1,2
this
.
crc
=
bstream
.
readNumber
(
2
);
// byte 3
this
.
headType
=
bstream
.
readNumber
(
1
);
// Get flags
// bytes 4,5
this
.
flags
=
{};
this
.
flags
.
value
=
bstream
.
readNumber
(
2
);
const
flagsValue
=
this
.
flags
.
value
;
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
flags
.
MHD_VOLUME
=
!!
(
flagsValue
&
0x01
);
this
.
flags
.
MHD_COMMENT
=
!!
(
flagsValue
&
0x02
);
this
.
flags
.
MHD_LOCK
=
!!
(
flagsValue
&
0x04
);
this
.
flags
.
MHD_SOLID
=
!!
(
flagsValue
&
0x08
);
this
.
flags
.
MHD_PACK_COMMENT
=
!!
(
flagsValue
&
0x10
);
this
.
flags
.
MHD_NEWNUMBERING
=
this
.
flags
.
MHD_PACK_COMMENT
;
this
.
flags
.
MHD_AV
=
!!
(
flagsValue
&
0x20
);
this
.
flags
.
MHD_PROTECT
=
!!
(
flagsValue
&
0x40
);
this
.
flags
.
MHD_PASSWORD
=
!!
(
flagsValue
&
0x80
);
this
.
flags
.
MHD_FIRSTVOLUME
=
!!
(
flagsValue
&
0x100
);
this
.
flags
.
MHD_ENCRYPTVER
=
!!
(
flagsValue
&
0x200
);
//bstream.readBits(6); // unused
break
;
case
FILE_HEAD
:
this
.
flags
.
LHD_SPLIT_BEFORE
=
!!
(
flagsValue
&
0x01
);
this
.
flags
.
LHD_SPLIT_AFTER
=
!!
(
flagsValue
&
0x02
);
this
.
flags
.
LHD_PASSWORD
=
!!
(
flagsValue
&
0x04
);
this
.
flags
.
LHD_COMMENT
=
!!
(
flagsValue
&
0x08
);
this
.
flags
.
LHD_SOLID
=
!!
(
flagsValue
&
0x10
);
// 3 bits unused
this
.
flags
.
LHD_LARGE
=
!!
(
flagsValue
&
0x100
);
this
.
flags
.
LHD_UNICODE
=
!!
(
flagsValue
&
0x200
);
this
.
flags
.
LHD_SALT
=
!!
(
flagsValue
&
0x400
);
this
.
flags
.
LHD_VERSION
=
!!
(
flagsValue
&
0x800
);
this
.
flags
.
LHD_EXTTIME
=
!!
(
flagsValue
&
0x1000
);
this
.
flags
.
LHD_EXTFLAGS
=
!!
(
flagsValue
&
0x2000
);
// 2 bits unused
//info(' LHD_SPLIT_BEFORE = ' + this.flags.LHD_SPLIT_BEFORE);
break
;
default
:
break
;
}
// byte 6,7
this
.
headSize
=
bstream
.
readNumber
(
2
);
headBytesRead
+=
7
;
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
highPosAv
=
bstream
.
readNumber
(
2
);
this
.
posAv
=
bstream
.
readNumber
(
4
);
headBytesRead
+=
6
;
if
(
this
.
flags
.
MHD_ENCRYPTVER
)
{
this
.
encryptVer
=
bstream
.
readNumber
(
1
);
headBytesRead
+=
1
;
}
//info('Found MAIN_HEAD with highPosAv=' + this.highPosAv + ', posAv=' + this.posAv);
break
;
case
FILE_HEAD
:
this
.
packSize
=
bstream
.
readNumber
(
4
);
this
.
unpackedSize
=
bstream
.
readNumber
(
4
);
this
.
hostOS
=
bstream
.
readNumber
(
1
);
this
.
fileCRC
=
bstream
.
readNumber
(
4
);
this
.
fileTime
=
bstream
.
readNumber
(
4
);
this
.
unpVer
=
bstream
.
readNumber
(
1
);
this
.
method
=
bstream
.
readNumber
(
1
);
this
.
nameSize
=
bstream
.
readNumber
(
2
);
this
.
fileAttr
=
bstream
.
readNumber
(
4
);
headBytesRead
+=
25
;
if
(
this
.
flags
.
LHD_LARGE
)
{
//info('Warning: Reading in LHD_LARGE 64-bit size values');
this
.
HighPackSize
=
bstream
.
readNumber
(
4
);
this
.
HighUnpSize
=
bstream
.
readNumber
(
4
);
headBytesRead
+=
8
;
}
else
{
this
.
HighPackSize
=
0
;
this
.
HighUnpSize
=
0
;
if
(
this
.
unpackedSize
==
0xffffffff
)
{
this
.
HighUnpSize
=
0x7fffffff
this
.
unpackedSize
=
0xffffffff
;
}
}
this
.
fullPackSize
=
0
;
this
.
fullUnpackSize
=
0
;
this
.
fullPackSize
|=
this
.
HighPackSize
;
this
.
fullPackSize
<<=
32
;
this
.
fullPackSize
|=
this
.
packSize
;
// read in filename
// TODO: Use readString?
this
.
filename
=
bstream
.
readBytes
(
this
.
nameSize
);
headBytesRead
+=
this
.
nameSize
;
let
_s
=
''
;
for
(
let
_i
=
0
;
_i
<
this
.
filename
.
length
;
_i
++
)
{
_s
+=
String
.
fromCharCode
(
this
.
filename
[
_i
]);
}
this
.
filename
=
_s
;
if
(
this
.
flags
.
LHD_SALT
)
{
//info('Warning: Reading in 64-bit salt value');
this
.
salt
=
bstream
.
readBytes
(
8
);
// 8 bytes
headBytesRead
+=
8
;
}
if
(
this
.
flags
.
LHD_EXTTIME
)
{
// 16-bit flags
const
extTimeFlags
=
bstream
.
readNumber
(
2
);
headBytesRead
+=
2
;
// this is adapted straight out of arcread.cpp, Archive::ReadHeader()
for
(
let
I
=
0
;
I
<
4
;
++
I
)
{
const
rmode
=
extTimeFlags
>>
((
3
-
I
)
*
4
);
if
((
rmode
&
8
)
==
0
)
{
continue
;
}
if
(
I
!=
0
)
{
bstream
.
readBytes
(
2
);
headBytesRead
+=
2
;
}
const
count
=
(
rmode
&
3
);
for
(
let
J
=
0
;
J
<
count
;
++
J
)
{
bstream
.
readNumber
(
1
);
headBytesRead
+=
1
;
}
}
}
if
(
this
.
flags
.
LHD_COMMENT
)
{
//info('Found a LHD_COMMENT');
}
if
(
headBytesRead
<
this
.
headSize
)
{
bstream
.
readBytes
(
this
.
headSize
-
headBytesRead
);
}
break
;
case
ENDARC_HEAD
:
break
;
default
:
if
(
logToConsole
)
{
info
(
'Found a header of type 0x'
+
byteValueToHexString
(
this
.
headType
));
}
// skip the rest of the header bytes (for now)
bstream
.
readBytes
(
this
.
headSize
-
7
);
break
;
}
}
dump
()
{
info
(
' crc='
+
this
.
crc
);
info
(
' headType='
+
this
.
headType
);
info
(
' flags='
+
twoByteValueToHexString
(
this
.
flags
.
value
));
info
(
' headSize='
+
this
.
headSize
);
if
(
this
.
headType
==
FILE_HEAD
)
{
info
(
'Found FILE_HEAD with packSize='
+
this
.
packSize
+
', unpackedSize= '
+
this
.
unpackedSize
+
', hostOS='
+
this
.
hostOS
+
', unpVer='
+
this
.
unpVer
+
', method='
+
this
.
method
+
', filename='
+
this
.
filename
);
}
}
}
const
BLOCK_LZ
=
0
;
const
BLOCK_PPM
=
1
;
const
rLDecode
=
[
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
10
,
12
,
14
,
16
,
20
,
24
,
28
,
32
,
40
,
48
,
56
,
64
,
80
,
96
,
112
,
128
,
160
,
192
,
224
];
const
rLBits
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
4
,
4
,
4
,
4
,
5
,
5
,
5
,
5
];
const
rDBitLengthCounts
=
[
4
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
14
,
0
,
12
];
const
rSDDecode
=
[
0
,
4
,
8
,
16
,
32
,
64
,
128
,
192
];
const
rSDBits
=
[
2
,
2
,
3
,
4
,
5
,
6
,
6
,
6
];
const
rDDecode
=
[
0
,
1
,
2
,
3
,
4
,
6
,
8
,
12
,
16
,
24
,
32
,
48
,
64
,
96
,
128
,
192
,
256
,
384
,
512
,
768
,
1024
,
1536
,
2048
,
3072
,
4096
,
6144
,
8192
,
12288
,
16384
,
24576
,
32768
,
49152
,
65536
,
98304
,
131072
,
196608
,
262144
,
327680
,
393216
,
458752
,
524288
,
589824
,
655360
,
720896
,
786432
,
851968
,
917504
,
983040
];
const
rDBits
=
[
0
,
0
,
0
,
0
,
1
,
1
,
2
,
2
,
3
,
3
,
4
,
4
,
5
,
5
,
6
,
6
,
7
,
7
,
8
,
8
,
9
,
9
,
10
,
10
,
11
,
11
,
12
,
12
,
13
,
13
,
14
,
14
,
15
,
15
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
];
const
rLOW_DIST_REP_COUNT
=
16
;
const
rNC
=
299
;
const
rDC
=
60
;
const
rLDC
=
17
;
const
rRC
=
28
;
const
rBC
=
20
;
const
rHUFF_TABLE_SIZE
=
(
rNC
+
rDC
+
rRC
+
rLDC
);
const
UnpOldTable
=
new
Array
(
rHUFF_TABLE_SIZE
);
const
BD
=
{
//bitdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rBC
)
};
const
LD
=
{
//litdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rNC
)
};
const
DD
=
{
//distdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rDC
)
};
const
LDD
=
{
//low dist decode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rLDC
)
};
const
RD
=
{
//rep decode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rRC
)
};
/**
* @type {Array<bitjs.io.ByteBuffer>}
*/
const
rOldBuffers
=
[];
/**
* The current buffer we are unpacking to.
* @type {bitjs.io.ByteBuffer}
*/
let
rBuffer
;
/**
* The buffer of the final bytes after filtering (only used in Unpack29).
* @type {bitjs.io.ByteBuffer}
*/
let
wBuffer
;
/**
* In unpack.cpp, UnpPtr keeps track of what bytes have been unpacked
* into the Window buffer and WrPtr keeps track of what bytes have been
* actually written to disk after the unpacking and optional filtering
* has been done.
*
* In our case, rBuffer is the buffer for the unpacked bytes and wBuffer is
* the final output bytes.
*/
/**
* Read in Huffman tables for RAR
* @param {bitjs.io.BitStream} bstream
*/
function
RarReadTables
(
bstream
)
{
const
BitLength
=
new
Array
(
rBC
);
const
Table
=
new
Array
(
rHUFF_TABLE_SIZE
);
// before we start anything we need to get byte-aligned
bstream
.
readBits
(
(
8
-
bstream
.
bitPtr
)
&
0x7
);
if
(
bstream
.
readBits
(
1
))
{
info
(
'Error! PPM not implemented yet'
);
return
;
}
if
(
!
bstream
.
readBits
(
1
))
{
//discard old table
for
(
let
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
0
;
}
}
// read in bit lengths
for
(
let
I
=
0
;
I
<
rBC
;
++
I
)
{
const
Length
=
bstream
.
readBits
(
4
);
if
(
Length
==
15
)
{
let
ZeroCount
=
bstream
.
readBits
(
4
);
if
(
ZeroCount
==
0
)
{
BitLength
[
I
]
=
15
;
}
else
{
ZeroCount
+=
2
;
while
(
ZeroCount
--
>
0
&&
I
<
rBC
)
{
BitLength
[
I
++
]
=
0
;
}
--
I
;
}
}
else
{
BitLength
[
I
]
=
Length
;
}
}
// now all 20 bit lengths are obtained, we construct the Huffman Table:
RarMakeDecodeTables
(
BitLength
,
0
,
BD
,
rBC
);
const
TableSize
=
rHUFF_TABLE_SIZE
;
for
(
let
i
=
0
;
i
<
TableSize
;)
{
const
num
=
RarDecodeNumber
(
bstream
,
BD
);
if
(
num
<
16
)
{
Table
[
i
]
=
(
num
+
UnpOldTable
[
i
])
&
0xf
;
i
++
;
}
else
if
(
num
<
18
)
{
let
N
=
(
num
==
16
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
while
(
N
--
>
0
&&
i
<
TableSize
)
{
Table
[
i
]
=
Table
[
i
-
1
];
i
++
;
}
}
else
{
let
N
=
(
num
==
18
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
while
(
N
--
>
0
&&
i
<
TableSize
)
{
Table
[
i
++
]
=
0
;
}
}
}
RarMakeDecodeTables
(
Table
,
0
,
LD
,
rNC
);
RarMakeDecodeTables
(
Table
,
rNC
,
DD
,
rDC
);
RarMakeDecodeTables
(
Table
,
rNC
+
rDC
,
LDD
,
rLDC
);
RarMakeDecodeTables
(
Table
,
rNC
+
rDC
+
rLDC
,
RD
,
rRC
);
for
(
let
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
Table
[
i
];
}
return
true
;
}
function
RarDecodeNumber
(
bstream
,
dec
)
{
const
DecodeLen
=
dec
.
DecodeLen
;
const
DecodePos
=
dec
.
DecodePos
;
const
DecodeNum
=
dec
.
DecodeNum
;
const
bitField
=
bstream
.
getBits
()
&
0xfffe
;
//some sort of rolled out binary search
const
bits
=
((
bitField
<
DecodeLen
[
8
])?
((
bitField
<
DecodeLen
[
4
])?
((
bitField
<
DecodeLen
[
2
])?
((
bitField
<
DecodeLen
[
1
])?
1
:
2
)
:((
bitField
<
DecodeLen
[
3
])?
3
:
4
))
:(
bitField
<
DecodeLen
[
6
])?
((
bitField
<
DecodeLen
[
5
])?
5
:
6
)
:((
bitField
<
DecodeLen
[
7
])?
7
:
8
))
:((
bitField
<
DecodeLen
[
12
])?
((
bitField
<
DecodeLen
[
10
])?
((
bitField
<
DecodeLen
[
9
])?
9
:
10
)
:((
bitField
<
DecodeLen
[
11
])?
11
:
12
))
:(
bitField
<
DecodeLen
[
14
])?
((
bitField
<
DecodeLen
[
13
])?
13
:
14
)
:
15
));
bstream
.
readBits
(
bits
);
const
N
=
DecodePos
[
bits
]
+
((
bitField
-
DecodeLen
[
bits
-
1
])
>>>
(
16
-
bits
));
return
DecodeNum
[
N
];
}
function
RarMakeDecodeTables
(
BitLength
,
offset
,
dec
,
size
)
{
const
DecodeLen
=
dec
.
DecodeLen
;
const
DecodePos
=
dec
.
DecodePos
;
const
DecodeNum
=
dec
.
DecodeNum
;
const
LenCount
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
const
TmpPos
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
let
N
=
0
;
let
M
=
0
;
for
(
let
i
=
DecodeNum
.
length
;
i
--
;)
{
DecodeNum
[
i
]
=
0
;
}
for
(
let
i
=
0
;
i
<
size
;
i
++
)
{
LenCount
[
BitLength
[
i
+
offset
]
&
0xF
]
++
;
}
LenCount
[
0
]
=
0
;
TmpPos
[
0
]
=
0
;
DecodePos
[
0
]
=
0
;
DecodeLen
[
0
]
=
0
;
for
(
let
I
=
1
;
I
<
16
;
++
I
)
{
N
=
2
*
(
N
+
LenCount
[
I
]);
M
=
(
N
<<
(
15
-
I
));
if
(
M
>
0xFFFF
)
{
M
=
0xFFFF
;
}
DecodeLen
[
I
]
=
M
;
DecodePos
[
I
]
=
DecodePos
[
I
-
1
]
+
LenCount
[
I
-
1
];
TmpPos
[
I
]
=
DecodePos
[
I
];
}
for
(
let
I
=
0
;
I
<
size
;
++
I
)
{
if
(
BitLength
[
I
+
offset
]
!=
0
)
{
DecodeNum
[
TmpPos
[
BitLength
[
offset
+
I
]
&
0xF
]
++
]
=
I
;
}
}
}
// TODO: implement
/**
* @param {bitjs.io.BitStream} bstream
* @param {boolean} Solid
*/
function
Unpack15
(
bstream
,
Solid
)
{
info
(
'ERROR! RAR 1.5 compression not supported'
);
}
/**
* Unpacks the bit stream into rBuffer using the Unpack20 algorithm.
* @param {bitjs.io.BitStream} bstream
* @param {boolean} Solid
*/
function
Unpack20
(
bstream
,
Solid
)
{
const
destUnpSize
=
rBuffer
.
data
.
length
;
let
oldDistPtr
=
0
;
if
(
!
Solid
)
{
RarReadTables20
(
bstream
);
}
while
(
destUnpSize
>
rBuffer
.
ptr
)
{
let
num
=
RarDecodeNumber
(
bstream
,
LD
);
if
(
num
<
256
)
{
rBuffer
.
insertByte
(
num
);
continue
;
}
if
(
num
>
269
)
{
let
Length
=
rLDecode
[
num
-=
270
]
+
3
;
if
((
Bits
=
rLBits
[
num
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
let
DistNumber
=
RarDecodeNumber
(
bstream
,
DD
);
let
Distance
=
rDDecode
[
DistNumber
]
+
1
;
if
((
Bits
=
rDBits
[
DistNumber
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
if
(
Distance
>=
0x2000
)
{
Length
++
;
if
(
Distance
>=
0x40000
)
{
Length
++
;
}
}
lastLength
=
Length
;
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
Distance
;
RarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
==
269
)
{
RarReadTables20
(
bstream
);
RarUpdateProgress
();
continue
;
}
if
(
num
==
256
)
{
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
lastDist
;
RarCopyString
(
lastLength
,
lastDist
);
continue
;
}
if
(
num
<
261
)
{
const
Distance
=
rOldDist
[(
oldDistPtr
-
(
num
-
256
))
&
3
];
const
LengthNumber
=
RarDecodeNumber
(
bstream
,
RD
);
let
Length
=
rLDecode
[
LengthNumber
]
+
2
;
if
((
Bits
=
rLBits
[
LengthNumber
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
if
(
Distance
>=
0x101
)
{
Length
++
;
if
(
Distance
>=
0x2000
)
{
Length
++
if
(
Distance
>=
0x40000
)
{
Length
++
;
}
}
}
lastLength
=
Length
;
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
Distance
;
RarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
<
270
)
{
let
Distance
=
rSDDecode
[
num
-=
261
]
+
1
;
if
((
Bits
=
rSDBits
[
num
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
lastLength
=
2
;
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
Distance
;
RarCopyString
(
2
,
Distance
);
continue
;
}
}
RarUpdateProgress
();
}
function
RarUpdateProgress
()
{
const
change
=
rBuffer
.
ptr
-
currentBytesUnarchivedInFile
;
currentBytesUnarchivedInFile
=
rBuffer
.
ptr
;
currentBytesUnarchived
+=
change
;
postProgress
();
}
const
rNC20
=
298
;
const
rDC20
=
48
;
const
rRC20
=
28
;
const
rBC20
=
19
;
const
rMC20
=
257
;
const
UnpOldTable20
=
new
Array
(
rMC20
*
4
);
// TODO: This function should return a boolean value, see unpack20.cpp.
function
RarReadTables20
(
bstream
)
{
const
BitLength
=
new
Array
(
rBC20
);
const
Table
=
new
Array
(
rMC20
*
4
);
let
TableSize
;
let
N
;
let
I
;
const
AudioBlock
=
bstream
.
readBits
(
1
);
if
(
!
bstream
.
readBits
(
1
))
{
for
(
let
i
=
UnpOldTable20
.
length
;
i
--
;)
{
UnpOldTable20
[
i
]
=
0
;
}
}
TableSize
=
rNC20
+
rDC20
+
rRC20
;
for
(
I
=
0
;
I
<
rBC20
;
I
++
)
{
BitLength
[
I
]
=
bstream
.
readBits
(
4
);
}
RarMakeDecodeTables
(
BitLength
,
0
,
BD
,
rBC20
);
I
=
0
;
while
(
I
<
TableSize
)
{
const
num
=
RarDecodeNumber
(
bstream
,
BD
);
if
(
num
<
16
)
{
Table
[
I
]
=
num
+
UnpOldTable20
[
I
]
&
0xf
;
I
++
;
}
else
if
(
num
==
16
)
{
N
=
bstream
.
readBits
(
2
)
+
3
;
while
(
N
--
>
0
&&
I
<
TableSize
)
{
Table
[
I
]
=
Table
[
I
-
1
];
I
++
;
}
}
else
{
if
(
num
==
17
)
{
N
=
bstream
.
readBits
(
3
)
+
3
;
}
else
{
N
=
bstream
.
readBits
(
7
)
+
11
;
}
while
(
N
--
>
0
&&
I
<
TableSize
)
{
Table
[
I
++
]
=
0
;
}
}
}
RarMakeDecodeTables
(
Table
,
0
,
LD
,
rNC20
);
RarMakeDecodeTables
(
Table
,
rNC20
,
DD
,
rDC20
);
RarMakeDecodeTables
(
Table
,
rNC20
+
rDC20
,
RD
,
rRC20
);
for
(
let
i
=
UnpOldTable20
.
length
;
i
--
;)
{
UnpOldTable20
[
i
]
=
Table
[
i
];
}
}
let
lowDistRepCount
=
0
;
let
prevLowDist
=
0
;
let
rOldDist
=
[
0
,
0
,
0
,
0
];
let
lastDist
;
let
lastLength
;
// ============================================================================================== //
// Unpack code specific to RarVM
const
VM
=
new
RarVM
();
/**
* Filters code, one entry per filter.
* @type {Array<UnpackFilter>}
*/
let
Filters
=
[];
/**
* Filters stack, several entrances of same filter are possible.
* @type {Array<UnpackFilter>}
*/
let
PrgStack
=
[];
/**
* Lengths of preceding blocks, one length per filter. Used to reduce
* size required to write block length if lengths are repeating.
* @type {Array<number>}
*/
let
OldFilterLengths
=
[];
let
LastFilter
=
0
;
function
InitFilters
()
{
OldFilterLengths
=
[];
LastFilter
=
0
;
Filters
=
[];
PrgStack
=
[];
}
/**
* @param {number} firstByte The first byte (flags).
* @param {Uint8Array} vmCode An array of bytes.
*/
function
RarAddVMCode
(
firstByte
,
vmCode
)
{
VM
.
init
();
const
bstream
=
new
bitjs
.
io
.
BitStream
(
vmCode
.
buffer
,
true
/* rtl */
);
let
filtPos
;
if
(
firstByte
&
0x80
)
{
filtPos
=
RarVM
.
readData
(
bstream
);
if
(
filtPos
==
0
)
{
InitFilters
();
}
else
{
filtPos
--
;
}
}
else
{
filtPos
=
LastFilter
;
}
if
(
filtPos
>
Filters
.
length
||
filtPos
>
OldFilterLengths
.
length
)
{
return
false
;
}
LastFilter
=
filtPos
;
const
newFilter
=
(
filtPos
==
Filters
.
length
);
// new filter for PrgStack
const
stackFilter
=
new
UnpackFilter
();
let
filter
=
null
;
// new filter code, never used before since VM reset
if
(
newFilter
)
{
// too many different filters, corrupt archive
if
(
filtPos
>
1024
)
{
return
false
;
}
filter
=
new
UnpackFilter
();
Filters
.
push
(
filter
);
stackFilter
.
ParentFilter
=
(
Filters
.
length
-
1
);
OldFilterLengths
.
push
(
0
);
// OldFilterLengths.Add(1)
filter
.
ExecCount
=
0
;
}
else
{
// filter was used in the past
filter
=
Filters
[
filtPos
];
stackFilter
.
ParentFilter
=
filtPos
;
filter
.
ExecCount
++
;
}
let
emptyCount
=
0
;
for
(
let
i
=
0
;
i
<
PrgStack
.
length
;
++
i
)
{
PrgStack
[
i
-
emptyCount
]
=
PrgStack
[
i
];
if
(
PrgStack
[
i
]
==
null
)
{
emptyCount
++
;
}
if
(
emptyCount
>
0
)
{
PrgStack
[
i
]
=
null
;
}
}
if
(
emptyCount
==
0
)
{
PrgStack
.
push
(
null
);
//PrgStack.Add(1);
emptyCount
=
1
;
}
const
stackPos
=
PrgStack
.
length
-
emptyCount
;
PrgStack
[
stackPos
]
=
stackFilter
;
stackFilter
.
ExecCount
=
filter
.
ExecCount
;
let
blockStart
=
RarVM
.
readData
(
bstream
);
if
(
firstByte
&
0x40
)
{
blockStart
+=
258
;
}
stackFilter
.
BlockStart
=
(
blockStart
+
rBuffer
.
ptr
)
&
MAXWINMASK
;
if
(
firstByte
&
0x20
)
{
stackFilter
.
BlockLength
=
RarVM
.
readData
(
bstream
);
}
else
{
stackFilter
.
BlockLength
=
filtPos
<
OldFilterLengths
.
length
?
OldFilterLengths
[
filtPos
]
:
0
;
}
stackFilter
.
NextWindow
=
(
wBuffer
.
ptr
!=
rBuffer
.
ptr
)
&&
(((
wBuffer
.
ptr
-
rBuffer
.
ptr
)
&
MAXWINMASK
)
<=
blockStart
);
OldFilterLengths
[
filtPos
]
=
stackFilter
.
BlockLength
;
for
(
let
i
=
0
;
i
<
7
;
++
i
)
{
stackFilter
.
Prg
.
InitR
[
i
]
=
0
;
}
stackFilter
.
Prg
.
InitR
[
3
]
=
VM_GLOBALMEMADDR
;
stackFilter
.
Prg
.
InitR
[
4
]
=
stackFilter
.
BlockLength
;
stackFilter
.
Prg
.
InitR
[
5
]
=
stackFilter
.
ExecCount
;
// set registers to optional parameters if any
if
(
firstByte
&
0x10
)
{
const
initMask
=
bstream
.
readBits
(
7
);
for
(
let
i
=
0
;
i
<
7
;
++
i
)
{
if
(
initMask
&
(
1
<<
i
))
{
stackFilter
.
Prg
.
InitR
[
i
]
=
RarVM
.
readData
(
bstream
);
}
}
}
if
(
newFilter
)
{
const
vmCodeSize
=
RarVM
.
readData
(
bstream
);
if
(
vmCodeSize
>=
0x10000
||
vmCodeSize
==
0
)
{
return
false
;
}
const
vmCode
=
new
Uint8Array
(
vmCodeSize
);
for
(
let
i
=
0
;
i
<
vmCodeSize
;
++
i
)
{
//if (Inp.Overflow(3))
// return(false);
vmCode
[
i
]
=
bstream
.
readBits
(
8
);
}
VM
.
prepare
(
vmCode
,
filter
.
Prg
);
}
stackFilter
.
Prg
.
Cmd
=
filter
.
Prg
.
Cmd
;
stackFilter
.
Prg
.
AltCmd
=
filter
.
Prg
.
Cmd
;
const
staticDataSize
=
filter
.
Prg
.
StaticData
.
length
;
if
(
staticDataSize
>
0
&&
staticDataSize
<
VM_GLOBALMEMSIZE
)
{
// read statically defined data contained in DB commands
for
(
let
i
=
0
;
i
<
staticDataSize
;
++
i
)
{
stackFilter
.
Prg
.
StaticData
[
i
]
=
filter
.
Prg
.
StaticData
[
i
];
}
}
if
(
stackFilter
.
Prg
.
GlobalData
.
length
<
VM_FIXEDGLOBALSIZE
)
{
stackFilter
.
Prg
.
GlobalData
=
new
Uint8Array
(
VM_FIXEDGLOBALSIZE
);
}
const
globalData
=
stackFilter
.
Prg
.
GlobalData
;
for
(
let
i
=
0
;
i
<
7
;
++
i
)
{
VM
.
setLowEndianValue
(
globalData
,
stackFilter
.
Prg
.
InitR
[
i
],
i
*
4
);
}
VM
.
setLowEndianValue
(
globalData
,
stackFilter
.
BlockLength
,
0x1c
);
VM
.
setLowEndianValue
(
globalData
,
0
,
0x20
);
VM
.
setLowEndianValue
(
globalData
,
stackFilter
.
ExecCount
,
0x2c
);
for
(
let
i
=
0
;
i
<
16
;
++
i
)
{
globalData
[
0x30
+
i
]
=
0
;
}
// put data block passed as parameter if any
if
(
firstByte
&
8
)
{
//if (Inp.Overflow(3))
// return(false);
const
dataSize
=
RarVM
.
readData
(
bstream
);
if
(
dataSize
>
(
VM_GLOBALMEMSIZE
-
VM_FIXEDGLOBALSIZE
))
{
return
false
;
}
const
curSize
=
stackFilter
.
Prg
.
GlobalData
.
length
;
if
(
curSize
<
dataSize
+
VM_FIXEDGLOBALSIZE
)
{
// Resize global data and update the stackFilter and local variable.
const
numBytesToAdd
=
dataSize
+
VM_FIXEDGLOBALSIZE
-
curSize
;
const
newGlobalData
=
new
Uint8Array
(
globalData
.
length
+
numBytesToAdd
);
newGlobalData
.
set
(
globalData
);
stackFilter
.
Prg
.
GlobalData
=
newGlobalData
;
globalData
=
newGlobalData
;
}
//byte *GlobalData=&StackFilter->Prg.GlobalData[VM_FIXEDGLOBALSIZE];
for
(
let
i
=
0
;
i
<
dataSize
;
++
i
)
{
//if (Inp.Overflow(3))
// return(false);
globalData
[
VM_FIXEDGLOBALSIZE
+
i
]
=
bstream
.
readBits
(
8
);
}
}
return
true
;
}
/**
* @param {!bitjs.io.BitStream} bstream
*/
function
RarReadVMCode
(
bstream
)
{
const
firstByte
=
bstream
.
readBits
(
8
);
let
length
=
(
firstByte
&
7
)
+
1
;
if
(
length
==
7
)
{
length
=
bstream
.
readBits
(
8
)
+
7
;
}
else
if
(
length
==
8
)
{
length
=
bstream
.
readBits
(
16
);
}
// Read all bytes of VM code into an array.
const
vmCode
=
new
Uint8Array
(
length
);
for
(
let
i
=
0
;
i
<
length
;
i
++
)
{
// Do something here with checking readbuf.
vmCode
[
i
]
=
bstream
.
readBits
(
8
);
}
return
RarAddVMCode
(
firstByte
,
vmCode
);
}
/**
* Unpacks the bit stream into rBuffer using the Unpack29 algorithm.
* @param {bitjs.io.BitStream} bstream
* @param {boolean} Solid
*/
function
Unpack29
(
bstream
,
Solid
)
{
// lazy initialize rDDecode and rDBits
const
DDecode
=
new
Array
(
rDC
);
const
DBits
=
new
Array
(
rDC
);
let
Dist
=
0
;
let
BitLength
=
0
;
let
Slot
=
0
;
for
(
let
I
=
0
;
I
<
rDBitLengthCounts
.
length
;
I
++
,
BitLength
++
)
{
for
(
let
J
=
0
;
J
<
rDBitLengthCounts
[
I
];
J
++
,
Slot
++
,
Dist
+=
(
1
<<
BitLength
))
{
DDecode
[
Slot
]
=
Dist
;
DBits
[
Slot
]
=
BitLength
;
}
}
let
Bits
;
//tablesRead = false;
rOldDist
=
[
0
,
0
,
0
,
0
]
lastDist
=
0
;
lastLength
=
0
;
for
(
let
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
0
;
}
// read in Huffman tables
RarReadTables
(
bstream
);
while
(
true
)
{
let
num
=
RarDecodeNumber
(
bstream
,
LD
);
if
(
num
<
256
)
{
rBuffer
.
insertByte
(
num
);
continue
;
}
if
(
num
>=
271
)
{
let
Length
=
rLDecode
[
num
-=
271
]
+
3
;
if
((
Bits
=
rLBits
[
num
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
const
DistNumber
=
RarDecodeNumber
(
bstream
,
DD
);
let
Distance
=
DDecode
[
DistNumber
]
+
1
;
if
((
Bits
=
DBits
[
DistNumber
])
>
0
)
{
if
(
DistNumber
>
9
)
{
if
(
Bits
>
4
)
{
Distance
+=
((
bstream
.
getBits
()
>>>
(
20
-
Bits
))
<<
4
);
bstream
.
readBits
(
Bits
-
4
);
//todo: check this
}
if
(
lowDistRepCount
>
0
)
{
lowDistRepCount
--
;
Distance
+=
prevLowDist
;
}
else
{
const
LowDist
=
RarDecodeNumber
(
bstream
,
LDD
);
if
(
LowDist
==
16
)
{
lowDistRepCount
=
rLOW_DIST_REP_COUNT
-
1
;
Distance
+=
prevLowDist
;
}
else
{
Distance
+=
LowDist
;
prevLowDist
=
LowDist
;
}
}
}
else
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
}
if
(
Distance
>=
0x2000
)
{
Length
++
;
if
(
Distance
>=
0x40000
)
{
Length
++
;
}
}
RarInsertOldDist
(
Distance
);
RarInsertLastMatch
(
Length
,
Distance
);
RarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
==
256
)
{
if
(
!
RarReadEndOfBlock
(
bstream
))
{
break
;
}
continue
;
}
if
(
num
==
257
)
{
if
(
!
RarReadVMCode
(
bstream
))
{
break
;
}
continue
;
}
if
(
num
==
258
)
{
if
(
lastLength
!=
0
)
{
RarCopyString
(
lastLength
,
lastDist
);
}
continue
;
}
if
(
num
<
263
)
{
const
DistNum
=
num
-
259
;
const
Distance
=
rOldDist
[
DistNum
];
for
(
let
I
=
DistNum
;
I
>
0
;
I
--
)
{
rOldDist
[
I
]
=
rOldDist
[
I
-
1
];
}
rOldDist
[
0
]
=
Distance
;
const
LengthNumber
=
RarDecodeNumber
(
bstream
,
RD
);
let
Length
=
rLDecode
[
LengthNumber
]
+
2
;
if
((
Bits
=
rLBits
[
LengthNumber
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
RarInsertLastMatch
(
Length
,
Distance
);
RarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
<
272
)
{
let
Distance
=
rSDDecode
[
num
-=
263
]
+
1
;
if
((
Bits
=
rSDBits
[
num
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
RarInsertOldDist
(
Distance
);
RarInsertLastMatch
(
2
,
Distance
);
RarCopyString
(
2
,
Distance
);
continue
;
}
}
// while (true)
RarUpdateProgress
();
RarWriteBuf
();
}
/**
* Does stuff to the current byte buffer (rBuffer) based on
* the filters loaded into the RarVM and writes out to wBuffer.
*/
function
RarWriteBuf
()
{
let
writeSize
=
(
rBuffer
.
ptr
&
MAXWINMASK
);
for
(
let
i
=
0
;
i
<
PrgStack
.
length
;
++
i
)
{
const
flt
=
PrgStack
[
i
];
if
(
flt
==
null
)
{
continue
;
}
if
(
flt
.
NextWindow
)
{
flt
.
NextWindow
=
false
;
continue
;
}
const
blockStart
=
flt
.
BlockStart
;
const
blockLength
=
flt
.
BlockLength
;
// WrittenBorder = wBuffer.ptr
if
(((
blockStart
-
wBuffer
.
ptr
)
&
MAXWINMASK
)
<
writeSize
)
{
if
(
wBuffer
.
ptr
!=
blockStart
)
{
// Copy blockStart bytes from rBuffer into wBuffer.
RarWriteArea
(
wBuffer
.
ptr
,
blockStart
);
writeSize
=
(
rBuffer
.
ptr
-
wBuffer
.
ptr
)
&
MAXWINMASK
;
}
if
(
blockLength
<=
writeSize
)
{
const
blockEnd
=
(
blockStart
+
blockLength
)
&
MAXWINMASK
;
if
(
blockStart
<
blockEnd
||
blockEnd
==
0
)
{
VM
.
setMemory
(
0
,
rBuffer
.
data
.
subarray
(
blockStart
,
blockStart
+
blockLength
),
blockLength
);
}
else
{
const
firstPartLength
=
MAXWINSIZE
-
blockStart
;
VM
.
setMemory
(
0
,
rBuffer
.
data
.
subarray
(
blockStart
,
blockStart
+
firstPartLength
),
firstPartLength
);
VM
.
setMemory
(
firstPartLength
,
rBuffer
.
data
,
blockEnd
);
}
const
parentPrg
=
Filters
[
flt
.
ParentFilter
].
Prg
;
const
prg
=
flt
.
Prg
;
if
(
parentPrg
.
GlobalData
.
length
>
VM_FIXEDGLOBALSIZE
)
{
// Copy global data from previous script execution if any.
prg
.
GlobalData
=
new
Uint8Array
(
parentPrg
.
GlobalData
);
}
RarExecuteCode
(
prg
);
if
(
prg
.
GlobalData
.
length
>
VM_FIXEDGLOBALSIZE
)
{
// Save global data for next script execution.
const
globalDataLen
=
prg
.
GlobalData
.
length
;
if
(
parentPrg
.
GlobalData
.
length
<
globalDataLen
)
{
parentPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
}
parentPrg
.
GlobalData
.
set
(
this
.
mem_
.
subarray
(
VM_FIXEDGLOBALSIZE
,
VM_FIXEDGLOBALSIZE
+
globalDataLen
),
VM_FIXEDGLOBALSIZE
);
}
else
{
parentPrg
.
GlobalData
=
new
Uint8Array
(
0
);
}
let
filteredData
=
prg
.
FilteredData
;
PrgStack
[
i
]
=
null
;
while
(
i
+
1
<
PrgStack
.
length
)
{
const
nextFilter
=
PrgStack
[
i
+
1
];
if
(
nextFilter
==
null
||
nextFilter
.
BlockStart
!=
blockStart
||
nextFilter
.
BlockLength
!=
filteredData
.
length
||
nextFilter
.
NextWindow
)
{
break
;
}
// Apply several filters to same data block.
VM
.
setMemory
(
0
,
filteredData
,
filteredData
.
length
);
const
innerParentPrg
=
Filters
[
nextFilter
.
ParentFilter
].
Prg
;
const
nextPrg
=
nextFilter
.
Prg
;
const
globalDataLen
=
innerParentPrg
.
GlobalData
.
length
;
if
(
globalDataLen
>
VM_FIXEDGLOBALSIZE
)
{
// Copy global data from previous script execution if any.
nextPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
nextPrg
.
GlobalData
.
set
(
innerParentPrg
.
GlobalData
.
subarray
(
VM_FIXEDGLOBALSIZE
,
VM_FIXEDGLOBALSIZE
+
globalDataLen
),
VM_FIXEDGLOBALSIZE
);
}
RarExecuteCode
(
nextPrg
);
if
(
nextPrg
.
GlobalData
.
length
>
VM_GLOBALMEMSIZE
)
{
// Save global data for next script execution.
const
globalDataLen
=
nextPrg
.
GlobalData
.
length
;
if
(
innerParentPrg
.
GlobalData
.
length
<
globalDataLen
)
{
innerParentPrg
.
GlobalData
=
new
Uint8Array
(
globalDataLen
);
}
innerParentPrg
.
GlobalData
.
set
(
this
.
mem_
.
subarray
(
VM_FIXEDGLOBALSIZE
,
VM_FIXEDGLOBALSIZE
+
globalDataLen
),
VM_FIXEDGLOBALSIZE
);
}
else
{
innerParentPrg
.
GlobalData
=
new
Uint8Array
(
0
);
}
filteredData
=
nextPrg
.
FilteredData
;
i
++
;
PrgStack
[
i
]
=
null
;
}
// while (i + 1 < PrgStack.length)
for
(
let
j
=
0
;
j
<
filteredData
.
length
;
++
j
)
{
wBuffer
.
insertByte
(
filteredData
[
j
]);
}
writeSize
=
(
rBuffer
.
ptr
-
wBuffer
.
ptr
)
&
MAXWINMASK
;
}
// if (blockLength <= writeSize)
else
{
for
(
let
j
=
i
;
j
<
PrgStack
.
length
;
++
j
)
{
const
theFlt
=
PrgStack
[
j
];
if
(
theFlt
!=
null
&&
theFlt
.
NextWindow
)
{
theFlt
.
NextWindow
=
false
;
}
}
return
;
}
}
// if (((blockStart - wBuffer.ptr) & MAXWINMASK) < writeSize)
}
// for (let i = 0; i < PrgStack.length; ++i)
// Write any remaining bytes from rBuffer to wBuffer;
RarWriteArea
(
wBuffer
.
ptr
,
rBuffer
.
ptr
);
// Now that the filtered buffer has been written, swap it back to rBuffer.
rBuffer
=
wBuffer
;
}
/**
* Copy bytes from rBuffer to wBuffer.
* @param {number} startPtr The starting point to copy from rBuffer.
* @param {number} endPtr The ending point to copy from rBuffer.
*/
function
RarWriteArea
(
startPtr
,
endPtr
)
{
if
(
endPtr
<
startPtr
)
{
console
.
error
(
'endPtr < startPtr, endPtr='
+
endPtr
+
', startPtr='
+
startPtr
);
// RarWriteData(startPtr, -(int)StartPtr & MAXWINMASK);
// RarWriteData(0, endPtr);
return
;
}
else
if
(
startPtr
<
endPtr
)
{
RarWriteData
(
startPtr
,
endPtr
-
startPtr
);
}
}
/**
* Writes bytes into wBuffer from rBuffer.
* @param {number} offset The starting point to copy bytes from rBuffer.
* @param {number} numBytes The number of bytes to copy.
*/
function
RarWriteData
(
offset
,
numBytes
)
{
if
(
wBuffer
.
ptr
>=
rBuffer
.
data
.
length
)
{
return
;
}
const
leftToWrite
=
rBuffer
.
data
.
length
-
wBuffer
.
ptr
;
if
(
numBytes
>
leftToWrite
)
{
numBytes
=
leftToWrite
;
}
for
(
let
i
=
0
;
i
<
numBytes
;
++
i
)
{
wBuffer
.
insertByte
(
rBuffer
.
data
[
offset
+
i
]);
}
}
/**
* @param {VM_PreparedProgram} prg
*/
function
RarExecuteCode
(
prg
)
{
if
(
prg
.
GlobalData
.
length
>
0
)
{
const
writtenFileSize
=
wBuffer
.
ptr
;
prg
.
InitR
[
6
]
=
writtenFileSize
;
VM
.
setLowEndianValue
(
prg
.
GlobalData
,
writtenFileSize
,
0x24
);
VM
.
setLowEndianValue
(
prg
.
GlobalData
,
(
writtenFileSize
>>>
32
)
>>
0
,
0x28
);
VM
.
execute
(
prg
);
}
}
function
RarReadEndOfBlock
(
bstream
)
{
RarUpdateProgress
();
let
NewTable
=
false
;
let
NewFile
=
false
;
if
(
bstream
.
readBits
(
1
))
{
NewTable
=
true
;
}
else
{
NewFile
=
true
;
NewTable
=
!!
bstream
.
readBits
(
1
);
}
//tablesRead = !NewTable;
return
!
(
NewFile
||
NewTable
&&
!
RarReadTables
(
bstream
));
}
function
RarInsertLastMatch
(
length
,
distance
)
{
lastDist
=
distance
;
lastLength
=
length
;
}
function
RarInsertOldDist
(
distance
)
{
rOldDist
.
splice
(
3
,
1
);
rOldDist
.
splice
(
0
,
0
,
distance
);
}
/**
* Copies len bytes from distance bytes ago in the buffer to the end of the
* current byte buffer.
* @param {number} length How many bytes to copy.
* @param {number} distance How far back in the buffer from the current write
* pointer to start copying from.
*/
function
RarCopyString
(
len
,
distance
)
{
let
srcPtr
=
rBuffer
.
ptr
-
distance
;
// If we need to go back to previous buffers, then seek back.
if
(
srcPtr
<
0
)
{
let
l
=
rOldBuffers
.
length
;
while
(
srcPtr
<
0
)
{
srcPtr
=
rOldBuffers
[
--
l
].
data
.
length
+
srcPtr
;
}
// TODO: lets hope that it never needs to read across buffer boundaries
while
(
len
--
)
{
rBuffer
.
insertByte
(
rOldBuffers
[
l
].
data
[
srcPtr
++
]);
}
}
if
(
len
>
distance
)
{
while
(
len
--
)
{
rBuffer
.
insertByte
(
rBuffer
.
data
[
srcPtr
++
]);
}
}
else
{
rBuffer
.
insertBytes
(
rBuffer
.
data
.
subarray
(
srcPtr
,
srcPtr
+
len
));
}
}
/**
* @param {RarLocalFile} v
*/
function
unpack
(
v
)
{
// TODO: implement what happens when unpVer is < 15
const
Ver
=
v
.
header
.
unpVer
<=
15
?
15
:
v
.
header
.
unpVer
;
const
Solid
=
v
.
header
.
flags
.
LHD_SOLID
;
const
bstream
=
new
bitjs
.
io
.
BitStream
(
v
.
fileData
.
buffer
,
true
/* rtl */
,
v
.
fileData
.
byteOffset
,
v
.
fileData
.
byteLength
);
rBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
v
.
header
.
unpackedSize
);
if
(
logToConsole
)
{
info
(
'Unpacking '
+
v
.
filename
+
' RAR v'
+
Ver
);
}
switch
(
Ver
)
{
case
15
:
// rar 1.5 compression
Unpack15
(
bstream
,
Solid
);
break
;
case
20
:
// rar 2.x compression
case
26
:
// files larger than 2GB
Unpack20
(
bstream
,
Solid
);
break
;
case
29
:
// rar 3.x compression
case
36
:
// alternative hash
wBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
rBuffer
.
data
.
length
);
Unpack29
(
bstream
,
Solid
);
break
;
}
// switch(method)
rOldBuffers
.
push
(
rBuffer
);
// TODO: clear these old buffers when there's over 4MB of history
return
rBuffer
.
data
;
}
/**
*/
class
RarLocalFile
{
/**
* @param {bitjs.io.ByteStream} bstream
*/
constructor
(
bstream
)
{
this
.
header
=
new
RarVolumeHeader
(
bstream
);
this
.
filename
=
this
.
header
.
filename
;
if
(
this
.
header
.
headType
!=
FILE_HEAD
&&
this
.
header
.
headType
!=
ENDARC_HEAD
)
{
this
.
isValid
=
false
;
info
(
'Error! RAR Volume did not include a FILE_HEAD header '
);
}
else
{
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
header
.
packSize
>
0
)
{
this
.
fileData
=
bstream
.
readBytes
(
this
.
header
.
packSize
);
this
.
isValid
=
true
;
}
}
}
unrar
()
{
if
(
!
this
.
header
.
flags
.
LHD_SPLIT_BEFORE
)
{
// unstore file
if
(
this
.
header
.
method
==
0x30
)
{
if
(
logToConsole
)
{
info
(
'Unstore '
+
this
.
filename
);
}
this
.
isValid
=
true
;
currentBytesUnarchivedInFile
+=
this
.
fileData
.
length
;
currentBytesUnarchived
+=
this
.
fileData
.
length
;
// Create a new buffer and copy it over.
const
len
=
this
.
header
.
packSize
;
const
newBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
len
);
newBuffer
.
insertBytes
(
this
.
fileData
);
this
.
fileData
=
newBuffer
.
data
;
}
else
{
this
.
isValid
=
true
;
this
.
fileData
=
unpack
(
this
);
}
}
}
}
// Reads in the volume and main header.
function
unrar_start
()
{
let
bstream
=
bytestream
.
tee
();
const
header
=
new
RarVolumeHeader
(
bstream
);
if
(
header
.
crc
==
0x6152
&&
header
.
headType
==
0x72
&&
header
.
flags
.
value
==
0x1A21
&&
header
.
headSize
==
7
)
{
if
(
logToConsole
)
{
info
(
'Found RAR signature'
);
}
const
mhead
=
new
RarVolumeHeader
(
bstream
);
if
(
mhead
.
headType
!=
MAIN_HEAD
)
{
info
(
'Error! RAR did not include a MAIN_HEAD header'
);
}
else
{
bytestream
=
bstream
.
tee
();
}
}
}
function
unrar
()
{
let
bstream
=
bytestream
.
tee
();
let
localFile
=
null
;
do
{
localFile
=
new
RarLocalFile
(
bstream
);
if
(
logToConsole
)
{
info
(
'RAR localFile isValid='
+
localFile
.
isValid
+
', volume packSize='
+
localFile
.
header
.
packSize
);
localFile
.
header
.
dump
();
}
if
(
localFile
&&
localFile
.
isValid
&&
localFile
.
header
.
packSize
>
0
)
{
bytestream
=
bstream
.
tee
();
totalUncompressedBytesInArchive
+=
localFile
.
header
.
unpackedSize
;
allLocalFiles
.
push
(
localFile
);
currentFilename
=
localFile
.
header
.
filename
;
currentBytesUnarchivedInFile
=
0
;
localFile
.
unrar
();
if
(
localFile
.
isValid
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localFile
));
postProgress
();
}
}
else
if
(
localFile
.
header
.
packSize
==
0
&&
localFile
.
header
.
unpackedSize
==
0
)
{
// Skip this file.
localFile
.
isValid
=
true
;
}
}
while
(
localFile
.
isValid
&&
bstream
.
getNumBytesLeft
()
>
0
);
totalFilesInArchive
=
allLocalFiles
.
length
;
postProgress
();
bytestream
=
bstream
.
tee
();
};
// event.data.file has the first ArrayBuffer.
// event.data.bytes has all subsequent ArrayBuffers.
onmessage
=
function
(
event
)
{
const
bytes
=
event
.
data
.
file
||
event
.
data
.
bytes
;
logToConsole
=
!!
event
.
data
.
logToConsole
;
// This is the very first time we have been called. Initialize the bytestream.
if
(
!
bytestream
)
{
bytestream
=
new
bitjs
.
io
.
ByteStream
(
bytes
);
currentFilename
=
''
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
allLocalFiles
=
[];
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
}
else
{
bytestream
.
push
(
bytes
);
}
if
(
unarchiveState
===
UnarchiveState
.
NOT_STARTED
)
{
try
{
unrar_start
();
unarchiveState
=
UnarchiveState
.
UNARCHIVING
;
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
if
(
logToConsole
)
{
console
.
dir
(
e
);
}
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
postProgress
();
}
else
{
console
.
error
(
'Found an error while unrarring'
);
console
.
dir
(
e
);
throw
e
;
}
}
}
if
(
unarchiveState
===
UnarchiveState
.
UNARCHIVING
||
unarchiveState
===
UnarchiveState
.
WAITING
)
{
try
{
unrar
();
unarchiveState
=
UnarchiveState
.
FINISHED
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
if
(
logToConsole
)
{
console
.
dir
(
e
);
}
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
}
else
{
console
.
error
(
'Found an error while unrarring'
);
console
.
dir
(
e
);
throw
e
;
}
}
}
};
cps/static/js/untar.js
→
cps/static/js/
archive/
untar.js
View file @
c0d136cc
/**
/**
* untar.js
* untar.js
*
*
* Licensed under the MIT License
* Licensed under the MIT License
*
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 Google Inc.
*
*
* Reference Documentation:
* Reference Documentation:
*
*
* TAR format: http://www.gnu.org/software/automake/manual/tar/Standard.html
* TAR format: http://www.gnu.org/software/automake/manual/tar/Standard.html
*/
*/
// This file expects to be invoked as a Worker (see onmessage below).
// This file expects to be invoked as a Worker (see onmessage below).
importScripts
(
'bytestream.js'
);
importScripts
(
'
../io/
bytestream.js'
);
importScripts
(
'archive.js'
);
importScripts
(
'archive.js'
);
const
UnarchiveState
=
{
const
UnarchiveState
=
{
...
@@ -42,15 +42,6 @@ const info = function(str) {
...
@@ -42,15 +42,6 @@ const info = function(str) {
const
err
=
function
(
str
)
{
const
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
};
// Removes all characters from the first zero-byte in the string onwards.
var
readCleanString
=
function
(
bstr
,
numBytes
)
{
var
str
=
bstr
.
readString
(
numBytes
);
var
zIndex
=
str
.
indexOf
(
String
.
fromCharCode
(
0
));
return
zIndex
!=
-
1
?
str
.
substr
(
0
,
zIndex
)
:
str
;
};
const
postProgress
=
function
()
{
const
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFilename
,
...
@@ -63,6 +54,12 @@ const postProgress = function() {
...
@@ -63,6 +54,12 @@ const postProgress = function() {
));
));
};
};
// Removes all characters from the first zero-byte in the string onwards.
const
readCleanString
=
function
(
bstr
,
numBytes
)
{
const
str
=
bstr
.
readString
(
numBytes
);
const
zIndex
=
str
.
indexOf
(
String
.
fromCharCode
(
0
));
return
zIndex
!=
-
1
?
str
.
substr
(
0
,
zIndex
)
:
str
;
};
class
TarLocalFile
{
class
TarLocalFile
{
// takes a ByteStream and parses out the local file information
// takes a ByteStream and parses out the local file information
...
@@ -82,7 +79,7 @@ class TarLocalFile {
...
@@ -82,7 +79,7 @@ class TarLocalFile {
this
.
typeflag
=
readCleanString
(
bstream
,
1
);
this
.
typeflag
=
readCleanString
(
bstream
,
1
);
this
.
linkname
=
readCleanString
(
bstream
,
100
);
this
.
linkname
=
readCleanString
(
bstream
,
100
);
this
.
maybeMagic
=
readCleanString
(
bstream
,
6
);
this
.
maybeMagic
=
readCleanString
(
bstream
,
6
);
if
(
this
.
maybeMagic
==
"ustar"
)
{
if
(
this
.
maybeMagic
==
"ustar"
)
{
this
.
version
=
readCleanString
(
bstream
,
2
);
this
.
version
=
readCleanString
(
bstream
,
2
);
this
.
uname
=
readCleanString
(
bstream
,
32
);
this
.
uname
=
readCleanString
(
bstream
,
32
);
...
@@ -94,7 +91,7 @@ class TarLocalFile {
...
@@ -94,7 +91,7 @@ class TarLocalFile {
if
(
this
.
prefix
.
length
)
{
if
(
this
.
prefix
.
length
)
{
this
.
name
=
this
.
prefix
+
this
.
name
;
this
.
name
=
this
.
prefix
+
this
.
name
;
}
}
bstream
.
readBytes
(
12
);
// 512 - 500
in
bstream
.
readBytes
(
12
);
// 512 - 500
}
else
{
}
else
{
bstream
.
readBytes
(
255
);
// 512 - 257
bstream
.
readBytes
(
255
);
// 512 - 257
}
}
...
...
cps/static/js/archive/unzip.js
0 → 100644
View file @
c0d136cc
/**
* unzip.js
*
* Licensed under the MIT License
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 antimatter15
*
* Reference Documentation:
*
* ZIP format: http://www.pkware.com/documents/casestudies/APPNOTE.TXT
* DEFLATE format: http://tools.ietf.org/html/rfc1951
*/
// This file expects to be invoked as a Worker (see onmessage below).
importScripts
(
'../io/bitstream.js'
);
importScripts
(
'../io/bytebuffer.js'
);
importScripts
(
'../io/bytestream.js'
);
importScripts
(
'archive.js'
);
const
UnarchiveState
=
{
NOT_STARTED
:
0
,
UNARCHIVING
:
1
,
WAITING
:
2
,
FINISHED
:
3
,
};
// State - consider putting these into a class.
let
unarchiveState
=
UnarchiveState
.
NOT_STARTED
;
let
bytestream
=
null
;
let
allLocalFiles
=
null
;
let
logToConsole
=
false
;
// Progress variables.
let
currentFilename
=
""
;
let
currentFileNumber
=
0
;
let
currentBytesUnarchivedInFile
=
0
;
let
currentBytesUnarchived
=
0
;
let
totalUncompressedBytesInArchive
=
0
;
let
totalFilesInArchive
=
0
;
// Helper functions.
const
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
const
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
const
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
,
bytestream
.
getNumBytesRead
(),
));
};
const
zLocalFileHeaderSignature
=
0x04034b50
;
const
zArchiveExtraDataSignature
=
0x08064b50
;
const
zCentralFileHeaderSignature
=
0x02014b50
;
const
zDigitalSignatureSignature
=
0x05054b50
;
const
zEndOfCentralDirSignature
=
0x06064b50
;
const
zEndOfCentralDirLocatorSignature
=
0x07064b50
;
// mask for getting the Nth bit (zero-based)
const
BIT
=
[
0x01
,
0x02
,
0x04
,
0x08
,
0x10
,
0x20
,
0x40
,
0x80
,
0x100
,
0x200
,
0x400
,
0x800
,
0x1000
,
0x2000
,
0x4000
,
0x8000
];
class
ZipLocalFile
{
// takes a ByteStream and parses out the local file information
constructor
(
bstream
)
{
if
(
typeof
bstream
!=
typeof
{}
||
!
bstream
.
readNumber
||
typeof
bstream
.
readNumber
!=
typeof
function
(){})
{
return
null
;
}
bstream
.
readNumber
(
4
);
// swallow signature
this
.
version
=
bstream
.
readNumber
(
2
);
this
.
generalPurpose
=
bstream
.
readNumber
(
2
);
this
.
compressionMethod
=
bstream
.
readNumber
(
2
);
this
.
lastModFileTime
=
bstream
.
readNumber
(
2
);
this
.
lastModFileDate
=
bstream
.
readNumber
(
2
);
this
.
crc32
=
bstream
.
readNumber
(
4
);
this
.
compressedSize
=
bstream
.
readNumber
(
4
);
this
.
uncompressedSize
=
bstream
.
readNumber
(
4
);
this
.
fileNameLength
=
bstream
.
readNumber
(
2
);
this
.
extraFieldLength
=
bstream
.
readNumber
(
2
);
this
.
filename
=
null
;
if
(
this
.
fileNameLength
>
0
)
{
this
.
filename
=
bstream
.
readString
(
this
.
fileNameLength
);
}
this
.
extraField
=
null
;
if
(
this
.
extraFieldLength
>
0
)
{
this
.
extraField
=
bstream
.
readString
(
this
.
extraFieldLength
);
//info(" extra field=" + this.extraField);
}
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
compressedSize
>
0
)
{
this
.
fileData
=
new
Uint8Array
(
bstream
.
readBytes
(
this
.
compressedSize
));
}
// TODO: deal with data descriptor if present (we currently assume no data descriptor!)
// "This descriptor exists only if bit 3 of the general purpose bit flag is set"
// But how do you figure out how big the file data is if you don't know the compressedSize
// from the header?!?
if
((
this
.
generalPurpose
&
BIT
[
3
])
!=
0
)
{
this
.
crc32
=
bstream
.
readNumber
(
4
);
this
.
compressedSize
=
bstream
.
readNumber
(
4
);
this
.
uncompressedSize
=
bstream
.
readNumber
(
4
);
}
// Now that we have all the bytes for this file, we can print out some information.
if
(
logToConsole
)
{
info
(
"Zip Local File Header:"
);
info
(
" version="
+
this
.
version
);
info
(
" general purpose="
+
this
.
generalPurpose
);
info
(
" compression method="
+
this
.
compressionMethod
);
info
(
" last mod file time="
+
this
.
lastModFileTime
);
info
(
" last mod file date="
+
this
.
lastModFileDate
);
info
(
" crc32="
+
this
.
crc32
);
info
(
" compressed size="
+
this
.
compressedSize
);
info
(
" uncompressed size="
+
this
.
uncompressedSize
);
info
(
" file name length="
+
this
.
fileNameLength
);
info
(
" extra field length="
+
this
.
extraFieldLength
);
info
(
" filename = '"
+
this
.
filename
+
"'"
);
}
}
// determine what kind of compressed data we have and decompress
unzip
()
{
// Zip Version 1.0, no compression (store only)
if
(
this
.
compressionMethod
==
0
)
{
if
(
logToConsole
)
{
info
(
"ZIP v"
+
this
.
version
+
", store only: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
}
currentBytesUnarchivedInFile
=
this
.
compressedSize
;
currentBytesUnarchived
+=
this
.
compressedSize
;
}
// version == 20, compression method == 8 (DEFLATE)
else
if
(
this
.
compressionMethod
==
8
)
{
if
(
logToConsole
)
{
info
(
"ZIP v2.0, DEFLATE: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
}
this
.
fileData
=
inflate
(
this
.
fileData
,
this
.
uncompressedSize
);
}
else
{
err
(
"UNSUPPORTED VERSION/FORMAT: ZIP v"
+
this
.
version
+
", compression method="
+
this
.
compressionMethod
+
": "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
this
.
fileData
=
null
;
}
}
}
// returns a table of Huffman codes
// each entry's index is its code and its value is a JavaScript object
// containing {length: 6, symbol: X}
function
getHuffmanCodes
(
bitLengths
)
{
// ensure bitLengths is an array containing at least one element
if
(
typeof
bitLengths
!=
typeof
[]
||
bitLengths
.
length
<
1
)
{
err
(
"Error! getHuffmanCodes() called with an invalid array"
);
return
null
;
}
// Reference: http://tools.ietf.org/html/rfc1951#page-8
const
numLengths
=
bitLengths
.
length
;
const
bl_count
=
[];
let
MAX_BITS
=
1
;
// Step 1: count up how many codes of each length we have
for
(
let
i
=
0
;
i
<
numLengths
;
++
i
)
{
const
length
=
bitLengths
[
i
];
// test to ensure each bit length is a positive, non-zero number
if
(
typeof
length
!=
typeof
1
||
length
<
0
)
{
err
(
"bitLengths contained an invalid number in getHuffmanCodes(): "
+
length
+
" of type "
+
(
typeof
length
));
return
null
;
}
// increment the appropriate bitlength count
if
(
bl_count
[
length
]
==
undefined
)
bl_count
[
length
]
=
0
;
// a length of zero means this symbol is not participating in the huffman coding
if
(
length
>
0
)
bl_count
[
length
]
++
;
if
(
length
>
MAX_BITS
)
MAX_BITS
=
length
;
}
// Step 2: Find the numerical value of the smallest code for each code length
const
next_code
=
[];
let
code
=
0
;
for
(
let
bits
=
1
;
bits
<=
MAX_BITS
;
++
bits
)
{
const
length
=
bits
-
1
;
// ensure undefined lengths are zero
if
(
bl_count
[
length
]
==
undefined
)
bl_count
[
length
]
=
0
;
code
=
(
code
+
bl_count
[
bits
-
1
])
<<
1
;
next_code
[
bits
]
=
code
;
}
// Step 3: Assign numerical values to all codes
const
table
=
{};
let
tableLength
=
0
;
for
(
let
n
=
0
;
n
<
numLengths
;
++
n
)
{
const
len
=
bitLengths
[
n
];
if
(
len
!=
0
)
{
table
[
next_code
[
len
]]
=
{
length
:
len
,
symbol
:
n
};
//, bitstring: binaryValueToString(next_code[len],len) };
tableLength
++
;
next_code
[
len
]
++
;
}
}
table
.
maxLength
=
tableLength
;
return
table
;
}
/*
The Huffman codes for the two alphabets are fixed, and are not
represented explicitly in the data. The Huffman code lengths
for the literal/length alphabet are:
Lit Value Bits Codes
--------- ---- -----
0 - 143 8 00110000 through
10111111
144 - 255 9 110010000 through
111111111
256 - 279 7 0000000 through
0010111
280 - 287 8 11000000 through
11000111
*/
// fixed Huffman codes go from 7-9 bits, so we need an array whose index can hold up to 9 bits
let
fixedHCtoLiteral
=
null
;
let
fixedHCtoDistance
=
null
;
function
getFixedLiteralTable
()
{
// create once
if
(
!
fixedHCtoLiteral
)
{
const
bitlengths
=
new
Array
(
288
);
for
(
let
i
=
0
;
i
<=
143
;
++
i
)
bitlengths
[
i
]
=
8
;
for
(
let
i
=
144
;
i
<=
255
;
++
i
)
bitlengths
[
i
]
=
9
;
for
(
let
i
=
256
;
i
<=
279
;
++
i
)
bitlengths
[
i
]
=
7
;
for
(
let
i
=
280
;
i
<=
287
;
++
i
)
bitlengths
[
i
]
=
8
;
// get huffman code table
fixedHCtoLiteral
=
getHuffmanCodes
(
bitlengths
);
}
return
fixedHCtoLiteral
;
}
function
getFixedDistanceTable
()
{
// create once
if
(
!
fixedHCtoDistance
)
{
const
bitlengths
=
new
Array
(
32
);
for
(
let
i
=
0
;
i
<
32
;
++
i
)
{
bitlengths
[
i
]
=
5
;
}
// get huffman code table
fixedHCtoDistance
=
getHuffmanCodes
(
bitlengths
);
}
return
fixedHCtoDistance
;
}
// extract one bit at a time until we find a matching Huffman Code
// then return that symbol
function
decodeSymbol
(
bstream
,
hcTable
)
{
let
code
=
0
;
let
len
=
0
;
let
match
=
false
;
// loop until we match
for
(;;)
{
// read in next bit
const
bit
=
bstream
.
readBits
(
1
);
code
=
(
code
<<
1
)
|
bit
;
++
len
;
// check against Huffman Code table and break if found
if
(
hcTable
.
hasOwnProperty
(
code
)
&&
hcTable
[
code
].
length
==
len
)
{
break
;
}
if
(
len
>
hcTable
.
maxLength
)
{
err
(
"Bit stream out of sync, didn't find a Huffman Code, length was "
+
len
+
" and table only max code length of "
+
hcTable
.
maxLength
);
break
;
}
}
return
hcTable
[
code
].
symbol
;
}
const
CodeLengthCodeOrder
=
[
16
,
17
,
18
,
0
,
8
,
7
,
9
,
6
,
10
,
5
,
11
,
4
,
12
,
3
,
13
,
2
,
14
,
1
,
15
];
/*
Extra Extra Extra
Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
---- ---- ------ ---- ---- ------- ---- ---- -------
257 0 3 267 1 15,16 277 4 67-82
258 0 4 268 1 17,18 278 4 83-98
259 0 5 269 2 19-22 279 4 99-114
260 0 6 270 2 23-26 280 4 115-130
261 0 7 271 2 27-30 281 5 131-162
262 0 8 272 2 31-34 282 5 163-194
263 0 9 273 3 35-42 283 5 195-226
264 0 10 274 3 43-50 284 5 227-257
265 1 11,12 275 3 51-58 285 0 258
266 1 13,14 276 3 59-66
*/
const
LengthLookupTable
=
[
[
0
,
3
],
[
0
,
4
],
[
0
,
5
],
[
0
,
6
],
[
0
,
7
],
[
0
,
8
],
[
0
,
9
],
[
0
,
10
],
[
1
,
11
],
[
1
,
13
],
[
1
,
15
],
[
1
,
17
],
[
2
,
19
],
[
2
,
23
],
[
2
,
27
],
[
2
,
31
],
[
3
,
35
],
[
3
,
43
],
[
3
,
51
],
[
3
,
59
],
[
4
,
67
],
[
4
,
83
],
[
4
,
99
],
[
4
,
115
],
[
5
,
131
],
[
5
,
163
],
[
5
,
195
],
[
5
,
227
],
[
0
,
258
]
];
/*
Extra Extra Extra
Code Bits Dist Code Bits Dist Code Bits Distance
---- ---- ---- ---- ---- ------ ---- ---- --------
0 0 1 10 4 33-48 20 9 1025-1536
1 0 2 11 4 49-64 21 9 1537-2048
2 0 3 12 5 65-96 22 10 2049-3072
3 0 4 13 5 97-128 23 10 3073-4096
4 1 5,6 14 6 129-192 24 11 4097-6144
5 1 7,8 15 6 193-256 25 11 6145-8192
6 2 9-12 16 7 257-384 26 12 8193-12288
7 2 13-16 17 7 385-512 27 12 12289-16384
8 3 17-24 18 8 513-768 28 13 16385-24576
9 3 25-32 19 8 769-1024 29 13 24577-32768
*/
const
DistLookupTable
=
[
[
0
,
1
],
[
0
,
2
],
[
0
,
3
],
[
0
,
4
],
[
1
,
5
],
[
1
,
7
],
[
2
,
9
],
[
2
,
13
],
[
3
,
17
],
[
3
,
25
],
[
4
,
33
],
[
4
,
49
],
[
5
,
65
],
[
5
,
97
],
[
6
,
129
],
[
6
,
193
],
[
7
,
257
],
[
7
,
385
],
[
8
,
513
],
[
8
,
769
],
[
9
,
1025
],
[
9
,
1537
],
[
10
,
2049
],
[
10
,
3073
],
[
11
,
4097
],
[
11
,
6145
],
[
12
,
8193
],
[
12
,
12289
],
[
13
,
16385
],
[
13
,
24577
]
];
function
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
)
{
/*
loop (until end of block code recognized)
decode literal/length value from input stream
if value < 256
copy value (literal byte) to output stream
otherwise
if value = end of block (256)
break from loop
otherwise (value = 257..285)
decode distance from input stream
move backwards distance bytes in the output
stream, and copy length bytes from this
position to the output stream.
*/
let
numSymbols
=
0
;
let
blockSize
=
0
;
for
(;;)
{
const
symbol
=
decodeSymbol
(
bstream
,
hcLiteralTable
);
++
numSymbols
;
if
(
symbol
<
256
)
{
// copy literal byte to output
buffer
.
insertByte
(
symbol
);
blockSize
++
;
}
else
{
// end of block reached
if
(
symbol
==
256
)
{
break
;
}
else
{
const
lengthLookup
=
LengthLookupTable
[
symbol
-
257
];
let
length
=
lengthLookup
[
1
]
+
bstream
.
readBits
(
lengthLookup
[
0
]);
const
distLookup
=
DistLookupTable
[
decodeSymbol
(
bstream
,
hcDistanceTable
)];
let
distance
=
distLookup
[
1
]
+
bstream
.
readBits
(
distLookup
[
0
]);
// now apply length and distance appropriately and copy to output
// TODO: check that backward distance < data.length?
// http://tools.ietf.org/html/rfc1951#page-11
// "Note also that the referenced string may overlap the current
// position; for example, if the last 2 bytes decoded have values
// X and Y, a string reference with <length = 5, distance = 2>
// adds X,Y,X,Y,X to the output stream."
//
// loop for each character
let
ch
=
buffer
.
ptr
-
distance
;
blockSize
+=
length
;
if
(
length
>
distance
)
{
const
data
=
buffer
.
data
;
while
(
length
--
)
{
buffer
.
insertByte
(
data
[
ch
++
]);
}
}
else
{
buffer
.
insertBytes
(
buffer
.
data
.
subarray
(
ch
,
ch
+
length
))
}
}
// length-distance pair
}
// length-distance pair or end-of-block
}
// loop until we reach end of block
return
blockSize
;
}
// {Uint8Array} compressedData A Uint8Array of the compressed file data.
// compression method 8
// deflate: http://tools.ietf.org/html/rfc1951
function
inflate
(
compressedData
,
numDecompressedBytes
)
{
// Bit stream representing the compressed data.
const
bstream
=
new
bitjs
.
io
.
BitStream
(
compressedData
.
buffer
,
false
/* rtl */
,
compressedData
.
byteOffset
,
compressedData
.
byteLength
);
const
buffer
=
new
bitjs
.
io
.
ByteBuffer
(
numDecompressedBytes
);
let
blockSize
=
0
;
// block format: http://tools.ietf.org/html/rfc1951#page-9
let
bFinal
=
0
;
do
{
bFinal
=
bstream
.
readBits
(
1
);
let
bType
=
bstream
.
readBits
(
2
);
blockSize
=
0
;
// no compression
if
(
bType
==
0
)
{
// skip remaining bits in this byte
while
(
bstream
.
bitPtr
!=
0
)
bstream
.
readBits
(
1
);
const
len
=
bstream
.
readBits
(
16
);
const
nlen
=
bstream
.
readBits
(
16
);
// TODO: check if nlen is the ones-complement of len?
if
(
len
>
0
)
buffer
.
insertBytes
(
bstream
.
readBytes
(
len
));
blockSize
=
len
;
}
// fixed Huffman codes
else
if
(
bType
==
1
)
{
blockSize
=
inflateBlockData
(
bstream
,
getFixedLiteralTable
(),
getFixedDistanceTable
(),
buffer
);
}
// dynamic Huffman codes
else
if
(
bType
==
2
)
{
const
numLiteralLengthCodes
=
bstream
.
readBits
(
5
)
+
257
;
const
numDistanceCodes
=
bstream
.
readBits
(
5
)
+
1
;
const
numCodeLengthCodes
=
bstream
.
readBits
(
4
)
+
4
;
// populate the array of code length codes (first de-compaction)
const
codeLengthsCodeLengths
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
for
(
let
i
=
0
;
i
<
numCodeLengthCodes
;
++
i
)
{
codeLengthsCodeLengths
[
CodeLengthCodeOrder
[
i
]
]
=
bstream
.
readBits
(
3
);
}
// get the Huffman Codes for the code lengths
const
codeLengthsCodes
=
getHuffmanCodes
(
codeLengthsCodeLengths
);
// now follow this mapping
/*
0 - 15: Represent code lengths of 0 - 15
16: Copy the previous code length 3 - 6 times.
The next 2 bits indicate repeat length
(0 = 3, ... , 3 = 6)
Example: Codes 8, 16 (+2 bits 11),
16 (+2 bits 10) will expand to
12 code lengths of 8 (1 + 6 + 5)
17: Repeat a code length of 0 for 3 - 10 times.
(3 bits of length)
18: Repeat a code length of 0 for 11 - 138 times
(7 bits of length)
*/
// to generate the true code lengths of the Huffman Codes for the literal
// and distance tables together
const
literalCodeLengths
=
[];
let
prevCodeLength
=
0
;
while
(
literalCodeLengths
.
length
<
numLiteralLengthCodes
+
numDistanceCodes
)
{
const
symbol
=
decodeSymbol
(
bstream
,
codeLengthsCodes
);
if
(
symbol
<=
15
)
{
literalCodeLengths
.
push
(
symbol
);
prevCodeLength
=
symbol
;
}
else
if
(
symbol
==
16
)
{
let
repeat
=
bstream
.
readBits
(
2
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
prevCodeLength
);
}
}
else
if
(
symbol
==
17
)
{
let
repeat
=
bstream
.
readBits
(
3
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
else
if
(
symbol
==
18
)
{
let
repeat
=
bstream
.
readBits
(
7
)
+
11
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
}
// now split the distance code lengths out of the literal code array
const
distanceCodeLengths
=
literalCodeLengths
.
splice
(
numLiteralLengthCodes
,
numDistanceCodes
);
// now generate the true Huffman Code tables using these code lengths
const
hcLiteralTable
=
getHuffmanCodes
(
literalCodeLengths
);
const
hcDistanceTable
=
getHuffmanCodes
(
distanceCodeLengths
);
blockSize
=
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
);
}
else
{
// error
err
(
"Error! Encountered deflate block of type 3"
);
return
null
;
}
// update progress
currentBytesUnarchivedInFile
+=
blockSize
;
currentBytesUnarchived
+=
blockSize
;
postProgress
();
}
while
(
bFinal
!=
1
);
// we are done reading blocks if the bFinal bit was set for this block
// return the buffer data bytes
return
buffer
.
data
;
}
function
unzip
()
{
let
bstream
=
bytestream
.
tee
();
// loop until we don't see any more local files
while
(
bstream
.
peekNumber
(
4
)
==
zLocalFileHeaderSignature
)
{
const
oneLocalFile
=
new
ZipLocalFile
(
bstream
);
// this should strip out directories/folders
if
(
oneLocalFile
&&
oneLocalFile
.
uncompressedSize
>
0
&&
oneLocalFile
.
fileData
)
{
// If we make it to this point and haven't thrown an error, we have successfully
// read in the data for a local file, so we can update the actual bytestream.
bytestream
=
bstream
.
tee
();
allLocalFiles
.
push
(
oneLocalFile
);
totalUncompressedBytesInArchive
+=
oneLocalFile
.
uncompressedSize
;
// update progress
currentFilename
=
oneLocalFile
.
filename
;
currentFileNumber
=
allLocalFiles
.
length
-
1
;
currentBytesUnarchivedInFile
=
0
;
// Actually do the unzipping.
oneLocalFile
.
unzip
();
if
(
oneLocalFile
.
fileData
!=
null
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
oneLocalFile
));
postProgress
();
}
}
}
totalFilesInArchive
=
allLocalFiles
.
length
;
// archive extra data record
if
(
bstream
.
peekNumber
(
4
)
==
zArchiveExtraDataSignature
)
{
if
(
logToConsole
)
{
info
(
" Found an Archive Extra Data Signature"
);
}
// skipping this record for now
bstream
.
readNumber
(
4
);
const
archiveExtraFieldLength
=
bstream
.
readNumber
(
4
);
bstream
.
readString
(
archiveExtraFieldLength
);
}
// central directory structure
// TODO: handle the rest of the structures (Zip64 stuff)
if
(
bytestream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
if
(
logToConsole
)
{
info
(
" Found a Central File Header"
);
}
// read all file headers
while
(
bstream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
bstream
.
readNumber
(
4
);
// signature
bstream
.
readNumber
(
2
);
// version made by
bstream
.
readNumber
(
2
);
// version needed to extract
bstream
.
readNumber
(
2
);
// general purpose bit flag
bstream
.
readNumber
(
2
);
// compression method
bstream
.
readNumber
(
2
);
// last mod file time
bstream
.
readNumber
(
2
);
// last mod file date
bstream
.
readNumber
(
4
);
// crc32
bstream
.
readNumber
(
4
);
// compressed size
bstream
.
readNumber
(
4
);
// uncompressed size
const
fileNameLength
=
bstream
.
readNumber
(
2
);
// file name length
const
extraFieldLength
=
bstream
.
readNumber
(
2
);
// extra field length
const
fileCommentLength
=
bstream
.
readNumber
(
2
);
// file comment length
bstream
.
readNumber
(
2
);
// disk number start
bstream
.
readNumber
(
2
);
// internal file attributes
bstream
.
readNumber
(
4
);
// external file attributes
bstream
.
readNumber
(
4
);
// relative offset of local header
bstream
.
readString
(
fileNameLength
);
// file name
bstream
.
readString
(
extraFieldLength
);
// extra field
bstream
.
readString
(
fileCommentLength
);
// file comment
}
}
// digital signature
if
(
bstream
.
peekNumber
(
4
)
==
zDigitalSignatureSignature
)
{
if
(
logToConsole
)
{
info
(
" Found a Digital Signature"
);
}
bstream
.
readNumber
(
4
);
const
sizeOfSignature
=
bstream
.
readNumber
(
2
);
bstream
.
readString
(
sizeOfSignature
);
// digital signature data
}
postProgress
();
bytestream
=
bstream
.
tee
();
}
// event.data.file has the first ArrayBuffer.
// event.data.bytes has all subsequent ArrayBuffers.
onmessage
=
function
(
event
)
{
const
bytes
=
event
.
data
.
file
||
event
.
data
.
bytes
;
logToConsole
=
!!
event
.
data
.
logToConsole
;
// This is the very first time we have been called. Initialize the bytestream.
if
(
!
bytestream
)
{
bytestream
=
new
bitjs
.
io
.
ByteStream
(
bytes
);
}
else
{
bytestream
.
push
(
bytes
);
}
if
(
unarchiveState
===
UnarchiveState
.
NOT_STARTED
)
{
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
currentBytesUnarchived
=
0
;
allLocalFiles
=
[];
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
unarchiveState
=
UnarchiveState
.
UNARCHIVING
;
postProgress
();
}
if
(
unarchiveState
===
UnarchiveState
.
UNARCHIVING
||
unarchiveState
===
UnarchiveState
.
WAITING
)
{
try
{
unzip
();
unarchiveState
=
UnarchiveState
.
FINISHED
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
catch
(
e
)
{
if
(
typeof
e
===
'string'
&&
e
.
startsWith
(
'Error! Overflowed'
))
{
// Overrun the buffer.
unarchiveState
=
UnarchiveState
.
WAITING
;
}
else
{
console
.
error
(
'Found an error while unzipping'
);
console
.
dir
(
e
);
throw
e
;
}
}
}
};
cps/static/js/io/bitstream.js
0 → 100644
View file @
c0d136cc
/*
* bitstream.js
*
* Provides readers for bitstreams.
*
* Licensed under the MIT License
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 antimatter15
*/
var
bitjs
=
bitjs
||
{};
bitjs
.
io
=
bitjs
.
io
||
{};
/**
* This object allows you to peek and consume bits and bytes out of a stream.
* Note that this stream is optimized, and thus, will *NOT* throw an error if
* the end of the stream is reached. Only use this in scenarios where you
* already have all the bits you need.
*/
bitjs
.
io
.
BitStream
=
class
{
/**
* @param {ArrayBuffer} ab An ArrayBuffer object or a Uint8Array.
* @param {boolean} rtl Whether the stream reads bits from the byte starting
* from bit 7 to 0 (true) or bit 0 to 7 (false).
* @param {Number} opt_offset The offset into the ArrayBuffer
* @param {Number} opt_length The length of this BitStream
*/
constructor
(
ab
,
rtl
,
opt_offset
,
opt_length
)
{
if
(
!
(
ab
instanceof
ArrayBuffer
))
{
throw
'Error! BitArray constructed with an invalid ArrayBuffer object'
;
}
const
offset
=
opt_offset
||
0
;
const
length
=
opt_length
||
ab
.
byteLength
;
/**
* The bytes in the stream.
* @type {Uint8Array}
* @private
*/
this
.
bytes
=
new
Uint8Array
(
ab
,
offset
,
length
);
/**
* The byte in the stream that we are currently on.
* @type {Number}
* @private
*/
this
.
bytePtr
=
0
;
/**
* The bit in the current byte that we will read next (can have values 0 through 7).
* @type {Number}
* @private
*/
this
.
bitPtr
=
0
;
// tracks which bit we are on (can have values 0 through 7)
/**
* An ever-increasing number.
* @type {Number}
* @private
*/
this
.
bitsRead_
=
0
;
this
.
peekBits
=
rtl
?
this
.
peekBits_rtl
:
this
.
peekBits_ltr
;
}
/**
* Returns how many bites have been read in the stream since the beginning of time.
*/
getNumBitsRead
()
{
return
this
.
bitsRead_
;
}
/**
* Returns how many bits are currently in the stream left to be read.
*/
getNumBitsLeft
()
{
const
bitsLeftInByte
=
8
-
this
.
bitPtr
;
return
(
this
.
bytes
.
byteLength
-
this
.
bytePtr
-
1
)
*
8
+
bitsLeftInByte
;
}
/**
* byte0 byte1 byte2 byte3
* 7......0 | 7......0 | 7......0 | 7......0
*
* The bit pointer starts at bit0 of byte0 and moves left until it reaches
* bit7 of byte0, then jumps to bit0 of byte1, etc.
* @param {number} n The number of bits to peek, must be a positive integer.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {number} The peeked bits, as an unsigned number.
*/
peekBits_ltr
(
n
,
opt_movePointers
)
{
const
NUM
=
parseInt
(
n
,
10
);
let
num
=
NUM
;
if
(
n
!==
num
||
num
<=
0
)
{
return
0
;
}
const
BITMASK
=
bitjs
.
io
.
BitStream
.
BITMASK
;
const
movePointers
=
opt_movePointers
||
false
;
let
bytes
=
this
.
bytes
;
let
bytePtr
=
this
.
bytePtr
;
let
bitPtr
=
this
.
bitPtr
;
let
result
=
0
;
let
bitsIn
=
0
;
// keep going until we have no more bits left to peek at
while
(
num
>
0
)
{
// We overflowed the stream, so just return what we got.
if
(
bytePtr
>=
bytes
.
length
)
{
break
;
}
const
numBitsLeftInThisByte
=
(
8
-
bitPtr
);
if
(
num
>=
numBitsLeftInThisByte
)
{
const
mask
=
(
BITMASK
[
numBitsLeftInThisByte
]
<<
bitPtr
);
result
|=
(((
bytes
[
bytePtr
]
&
mask
)
>>
bitPtr
)
<<
bitsIn
);
bytePtr
++
;
bitPtr
=
0
;
bitsIn
+=
numBitsLeftInThisByte
;
num
-=
numBitsLeftInThisByte
;
}
else
{
const
mask
=
(
BITMASK
[
num
]
<<
bitPtr
);
result
|=
(((
bytes
[
bytePtr
]
&
mask
)
>>
bitPtr
)
<<
bitsIn
);
bitPtr
+=
num
;
break
;
}
}
if
(
movePointers
)
{
this
.
bitPtr
=
bitPtr
;
this
.
bytePtr
=
bytePtr
;
this
.
bitsRead_
+=
NUM
;
}
return
result
;
}
/**
* byte0 byte1 byte2 byte3
* 7......0 | 7......0 | 7......0 | 7......0
*
* The bit pointer starts at bit7 of byte0 and moves right until it reaches
* bit0 of byte0, then goes to bit7 of byte1, etc.
* @param {number} n The number of bits to peek. Must be a positive integer.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {number} The peeked bits, as an unsigned number.
*/
peekBits_rtl
(
n
,
opt_movePointers
)
{
const
NUM
=
parseInt
(
n
,
10
);
let
num
=
NUM
;
if
(
n
!==
num
||
num
<=
0
)
{
return
0
;
}
const
BITMASK
=
bitjs
.
io
.
BitStream
.
BITMASK
;
const
movePointers
=
opt_movePointers
||
false
;
let
bytes
=
this
.
bytes
;
let
bytePtr
=
this
.
bytePtr
;
let
bitPtr
=
this
.
bitPtr
;
let
result
=
0
;
// keep going until we have no more bits left to peek at
while
(
num
>
0
)
{
// We overflowed the stream, so just return the bits we got.
if
(
bytePtr
>=
bytes
.
length
)
{
break
;
}
const
numBitsLeftInThisByte
=
(
8
-
bitPtr
);
if
(
num
>=
numBitsLeftInThisByte
)
{
result
<<=
numBitsLeftInThisByte
;
result
|=
(
BITMASK
[
numBitsLeftInThisByte
]
&
bytes
[
bytePtr
]);
bytePtr
++
;
bitPtr
=
0
;
num
-=
numBitsLeftInThisByte
;
}
else
{
result
<<=
num
;
const
numBits
=
8
-
num
-
bitPtr
;
result
|=
((
bytes
[
bytePtr
]
&
(
BITMASK
[
num
]
<<
numBits
))
>>
numBits
);
bitPtr
+=
num
;
break
;
}
}
if
(
movePointers
)
{
this
.
bitPtr
=
bitPtr
;
this
.
bytePtr
=
bytePtr
;
this
.
bitsRead_
+=
NUM
;
}
return
result
;
}
/**
* Peek at 16 bits from current position in the buffer.
* Bit at (bytePtr,bitPtr) has the highest position in returning data.
* Taken from getbits.hpp in unrar.
* TODO: Move this out of BitStream and into unrar.
*/
getBits
()
{
return
(((((
this
.
bytes
[
this
.
bytePtr
]
&
0xff
)
<<
16
)
+
((
this
.
bytes
[
this
.
bytePtr
+
1
]
&
0xff
)
<<
8
)
+
((
this
.
bytes
[
this
.
bytePtr
+
2
]
&
0xff
)))
>>>
(
8
-
this
.
bitPtr
))
&
0xffff
);
}
/**
* Reads n bits out of the stream, consuming them (moving the bit pointer).
* @param {number} n The number of bits to read. Must be a positive integer.
* @return {number} The read bits, as an unsigned number.
*/
readBits
(
n
)
{
return
this
.
peekBits
(
n
,
true
);
}
/**
* This returns n bytes as a sub-array, advancing the pointer if movePointers
* is true. Only use this for uncompressed blocks as this throws away remaining
* bits in the current byte.
* @param {number} n The number of bytes to peek. Must be a positive integer.
* @param {boolean=} movePointers Whether to move the pointer, defaults false.
* @return {Uint8Array} The subarray.
*/
peekBytes
(
n
,
opt_movePointers
)
{
const
num
=
parseInt
(
n
,
10
);
if
(
n
!==
num
||
num
<
0
)
{
throw
'Error! Called peekBytes() with a non-positive integer: '
+
n
;
}
else
if
(
num
===
0
)
{
return
new
Uint8Array
();
}
// Flush bits until we are byte-aligned.
// from http://tools.ietf.org/html/rfc1951#page-11
// "Any bits of input up to the next byte boundary are ignored."
while
(
this
.
bitPtr
!=
0
)
{
this
.
readBits
(
1
);
}
const
numBytesLeft
=
this
.
getNumBitsLeft
()
/
8
;
if
(
num
>
numBytesLeft
)
{
throw
'Error! Overflowed the bit stream! n='
+
num
+
', bytePtr='
+
this
.
bytePtr
+
', bytes.length='
+
this
.
bytes
.
length
+
', bitPtr='
+
this
.
bitPtr
;
}
const
movePointers
=
opt_movePointers
||
false
;
const
result
=
new
Uint8Array
(
num
);
let
bytes
=
this
.
bytes
;
let
ptr
=
this
.
bytePtr
;
let
bytesLeftToCopy
=
num
;
while
(
bytesLeftToCopy
>
0
)
{
const
bytesLeftInStream
=
bytes
.
length
-
ptr
;
const
sourceLength
=
Math
.
min
(
bytesLeftToCopy
,
bytesLeftInStream
);
result
.
set
(
bytes
.
subarray
(
ptr
,
ptr
+
sourceLength
),
num
-
bytesLeftToCopy
);
ptr
+=
sourceLength
;
// Overflowed the stream, just return what we got.
if
(
ptr
>=
bytes
.
length
)
{
break
;
}
bytesLeftToCopy
-=
sourceLength
;
}
if
(
movePointers
)
{
this
.
bytePtr
+=
num
;
this
.
bitsRead_
+=
(
num
*
8
);
}
return
result
;
}
/**
* @param {number} n The number of bytes to read.
* @return {Uint8Array} The subarray.
*/
readBytes
(
n
)
{
return
this
.
peekBytes
(
n
,
true
);
}
}
// mask for getting N number of bits (0-8)
bitjs
.
io
.
BitStream
.
BITMASK
=
[
0
,
0x01
,
0x03
,
0x07
,
0x0F
,
0x1F
,
0x3F
,
0x7F
,
0xFF
];
cps/static/js/io/bytebuffer.js
0 → 100644
View file @
c0d136cc
/*
* bytestream.js
*
* Provides a writer for bytes.
*
* Licensed under the MIT License
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 antimatter15
*/
var
bitjs
=
bitjs
||
{};
bitjs
.
io
=
bitjs
.
io
||
{};
/**
* A write-only Byte buffer which uses a Uint8 Typed Array as a backing store.
*/
bitjs
.
io
.
ByteBuffer
=
class
{
/**
* @param {number} numBytes The number of bytes to allocate.
*/
constructor
(
numBytes
)
{
if
(
typeof
numBytes
!=
typeof
1
||
numBytes
<=
0
)
{
throw
"Error! ByteBuffer initialized with '"
+
numBytes
+
"'"
;
}
this
.
data
=
new
Uint8Array
(
numBytes
);
this
.
ptr
=
0
;
}
/**
* @param {number} b The byte to insert.
*/
insertByte
(
b
)
{
// TODO: throw if byte is invalid?
this
.
data
[
this
.
ptr
++
]
=
b
;
}
/**
* @param {Array.<number>|Uint8Array|Int8Array} bytes The bytes to insert.
*/
insertBytes
(
bytes
)
{
// TODO: throw if bytes is invalid?
this
.
data
.
set
(
bytes
,
this
.
ptr
);
this
.
ptr
+=
bytes
.
length
;
}
/**
* Writes an unsigned number into the next n bytes. If the number is too large
* to fit into n bytes or is negative, an error is thrown.
* @param {number} num The unsigned number to write.
* @param {number} numBytes The number of bytes to write the number into.
*/
writeNumber
(
num
,
numBytes
)
{
if
(
numBytes
<
1
||
!
numBytes
)
{
throw
'Trying to write into too few bytes: '
+
numBytes
;
}
if
(
num
<
0
)
{
throw
'Trying to write a negative number ('
+
num
+
') as an unsigned number to an ArrayBuffer'
;
}
if
(
num
>
(
Math
.
pow
(
2
,
numBytes
*
8
)
-
1
))
{
throw
'Trying to write '
+
num
+
' into only '
+
numBytes
+
' bytes'
;
}
// Roll 8-bits at a time into an array of bytes.
const
bytes
=
[];
while
(
numBytes
--
>
0
)
{
const
eightBits
=
num
&
255
;
bytes
.
push
(
eightBits
);
num
>>=
8
;
}
this
.
insertBytes
(
bytes
);
}
/**
* Writes a signed number into the next n bytes. If the number is too large
* to fit into n bytes, an error is thrown.
* @param {number} num The signed number to write.
* @param {number} numBytes The number of bytes to write the number into.
*/
writeSignedNumber
(
num
,
numBytes
)
{
if
(
numBytes
<
1
)
{
throw
'Trying to write into too few bytes: '
+
numBytes
;
}
const
HALF
=
Math
.
pow
(
2
,
(
numBytes
*
8
)
-
1
);
if
(
num
>=
HALF
||
num
<
-
HALF
)
{
throw
'Trying to write '
+
num
+
' into only '
+
numBytes
+
' bytes'
;
}
// Roll 8-bits at a time into an array of bytes.
const
bytes
=
[];
while
(
numBytes
--
>
0
)
{
const
eightBits
=
num
&
255
;
bytes
.
push
(
eightBits
);
num
>>=
8
;
}
this
.
insertBytes
(
bytes
);
}
/**
* @param {string} str The ASCII string to write.
*/
writeASCIIString
(
str
)
{
for
(
let
i
=
0
;
i
<
str
.
length
;
++
i
)
{
const
curByte
=
str
.
charCodeAt
(
i
);
if
(
curByte
<
0
||
curByte
>
255
)
{
throw
'Trying to write a non-ASCII string!'
;
}
this
.
insertByte
(
curByte
);
}
};
}
cps/static/js/bytestream.js
→
cps/static/js/
io/
bytestream.js
View file @
c0d136cc
File moved
cps/static/js/unrar.js
deleted
100644 → 0
View file @
479b4b7d
/**
* unrar.js
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 antimatter15
*
* Reference Documentation:
*
* http://kthoom.googlecode.com/hg/docs/unrar.html
*/
/* global bitjs, importScripts */
// This file expects to be invoked as a Worker (see onmessage below).
importScripts
(
"io.js"
);
importScripts
(
"archive.js"
);
// Progress variables.
var
currentFilename
=
""
;
var
currentFileNumber
=
0
;
var
currentBytesUnarchivedInFile
=
0
;
var
currentBytesUnarchived
=
0
;
var
totalUncompressedBytesInArchive
=
0
;
var
totalFilesInArchive
=
0
;
// Helper functions.
var
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
var
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
var
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
));
};
// shows a byte value as its hex representation
var
nibble
=
"0123456789ABCDEF"
;
var
byteValueToHexString
=
function
(
num
)
{
return
nibble
[
num
>>
4
]
+
nibble
[
num
&
0xF
];
};
var
twoByteValueToHexString
=
function
(
num
)
{
return
nibble
[(
num
>>
12
)
&
0xF
]
+
nibble
[(
num
>>
8
)
&
0xF
]
+
nibble
[(
num
>>
4
)
&
0xF
]
+
nibble
[
num
&
0xF
];
};
// Volume Types
// MARK_HEAD = 0x72;
var
MAIN_HEAD
=
0x73
,
FILE_HEAD
=
0x74
,
// COMM_HEAD = 0x75,
// AV_HEAD = 0x76,
// SUB_HEAD = 0x77,
// PROTECT_HEAD = 0x78,
// SIGN_HEAD = 0x79,
// NEWSUB_HEAD = 0x7a,
ENDARC_HEAD
=
0x7b
;
// bstream is a bit stream
var
RarVolumeHeader
=
function
(
bstream
)
{
var
headPos
=
bstream
.
bytePtr
;
// byte 1,2
info
(
"Rar Volume Header @"
+
bstream
.
bytePtr
);
this
.
crc
=
bstream
.
readBits
(
16
);
info
(
" crc="
+
this
.
crc
);
// byte 3
this
.
headType
=
bstream
.
readBits
(
8
);
info
(
" headType="
+
this
.
headType
);
// Get flags
// bytes 4,5
this
.
flags
=
{};
this
.
flags
.
value
=
bstream
.
peekBits
(
16
);
info
(
" flags="
+
twoByteValueToHexString
(
this
.
flags
.
value
));
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
flags
.
MHD_VOLUME
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_COMMENT
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_LOCK
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_SOLID
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_PACK_COMMENT
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_NEWNUMBERING
=
this
.
flags
.
MHD_PACK_COMMENT
;
this
.
flags
.
MHD_AV
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_PROTECT
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_PASSWORD
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_FIRSTVOLUME
=
!!
bstream
.
readBits
(
1
);
this
.
flags
.
MHD_ENCRYPTVER
=
!!
bstream
.
readBits
(
1
);
bstream
.
readBits
(
6
);
// unused
break
;
case
FILE_HEAD
:
this
.
flags
.
LHD_SPLIT_BEFORE
=
!!
bstream
.
readBits
(
1
);
// 0x0001
this
.
flags
.
LHD_SPLIT_AFTER
=
!!
bstream
.
readBits
(
1
);
// 0x0002
this
.
flags
.
LHD_PASSWORD
=
!!
bstream
.
readBits
(
1
);
// 0x0004
this
.
flags
.
LHD_COMMENT
=
!!
bstream
.
readBits
(
1
);
// 0x0008
this
.
flags
.
LHD_SOLID
=
!!
bstream
.
readBits
(
1
);
// 0x0010
bstream
.
readBits
(
3
);
// unused
this
.
flags
.
LHD_LARGE
=
!!
bstream
.
readBits
(
1
);
// 0x0100
this
.
flags
.
LHD_UNICODE
=
!!
bstream
.
readBits
(
1
);
// 0x0200
this
.
flags
.
LHD_SALT
=
!!
bstream
.
readBits
(
1
);
// 0x0400
this
.
flags
.
LHD_VERSION
=
!!
bstream
.
readBits
(
1
);
// 0x0800
this
.
flags
.
LHD_EXTTIME
=
!!
bstream
.
readBits
(
1
);
// 0x1000
this
.
flags
.
LHD_EXTFLAGS
=
!!
bstream
.
readBits
(
1
);
// 0x2000
bstream
.
readBits
(
2
);
// unused
info
(
" LHD_SPLIT_BEFORE = "
+
this
.
flags
.
LHD_SPLIT_BEFORE
);
break
;
default
:
bstream
.
readBits
(
16
);
}
// byte 6,7
this
.
headSize
=
bstream
.
readBits
(
16
);
info
(
" headSize="
+
this
.
headSize
);
switch
(
this
.
headType
)
{
case
MAIN_HEAD
:
this
.
highPosAv
=
bstream
.
readBits
(
16
);
this
.
posAv
=
bstream
.
readBits
(
32
);
if
(
this
.
flags
.
MHD_ENCRYPTVER
)
{
this
.
encryptVer
=
bstream
.
readBits
(
8
);
}
info
(
"Found MAIN_HEAD with highPosAv="
+
this
.
highPosAv
+
", posAv="
+
this
.
posAv
);
break
;
case
FILE_HEAD
:
this
.
packSize
=
bstream
.
readBits
(
32
);
this
.
unpackedSize
=
bstream
.
readBits
(
32
);
this
.
hostOS
=
bstream
.
readBits
(
8
);
this
.
fileCRC
=
bstream
.
readBits
(
32
);
this
.
fileTime
=
bstream
.
readBits
(
32
);
this
.
unpVer
=
bstream
.
readBits
(
8
);
this
.
method
=
bstream
.
readBits
(
8
);
this
.
nameSize
=
bstream
.
readBits
(
16
);
this
.
fileAttr
=
bstream
.
readBits
(
32
);
if
(
this
.
flags
.
LHD_LARGE
)
{
info
(
"Warning: Reading in LHD_LARGE 64-bit size values"
);
this
.
HighPackSize
=
bstream
.
readBits
(
32
);
this
.
HighUnpSize
=
bstream
.
readBits
(
32
);
}
else
{
this
.
HighPackSize
=
0
;
this
.
HighUnpSize
=
0
;
if
(
this
.
unpackedSize
===
0xffffffff
)
{
this
.
HighUnpSize
=
0x7fffffff
;
this
.
unpackedSize
=
0xffffffff
;
}
}
this
.
fullPackSize
=
0
;
this
.
fullUnpackSize
=
0
;
this
.
fullPackSize
|=
this
.
HighPackSize
;
this
.
fullPackSize
<<=
32
;
this
.
fullPackSize
|=
this
.
packSize
;
// read in filename
this
.
filename
=
bstream
.
readBytes
(
this
.
nameSize
);
var
_s
=
""
;
for
(
var
_i
=
0
;
_i
<
this
.
filename
.
length
;
_i
++
)
{
_s
+=
String
.
fromCharCode
(
this
.
filename
[
_i
]);
}
this
.
filename
=
_s
;
if
(
this
.
flags
.
LHD_SALT
)
{
info
(
"Warning: Reading in 64-bit salt value"
);
this
.
salt
=
bstream
.
readBits
(
64
);
// 8 bytes
}
if
(
this
.
flags
.
LHD_EXTTIME
)
{
// 16-bit flags
var
extTimeFlags
=
bstream
.
readBits
(
16
);
// this is adapted straight out of arcread.cpp, Archive::ReadHeader()
for
(
var
I
=
0
;
I
<
4
;
++
I
)
{
var
rmode
=
extTimeFlags
>>
((
3
-
I
)
*
4
);
if
((
rmode
&
8
)
===
0
)
{
continue
;
}
if
(
I
!==
0
)
{
bstream
.
readBits
(
16
);
}
var
count
=
(
rmode
&
3
);
for
(
var
J
=
0
;
J
<
count
;
++
J
)
{
bstream
.
readBits
(
8
);
}
}
}
if
(
this
.
flags
.
LHD_COMMENT
)
{
info
(
"Found a LHD_COMMENT"
);
}
while
(
headPos
+
this
.
headSize
>
bstream
.
bytePtr
)
bstream
.
readBits
(
1
);
info
(
"Found FILE_HEAD with packSize="
+
this
.
packSize
+
", unpackedSize= "
+
this
.
unpackedSize
+
", hostOS="
+
this
.
hostOS
+
", unpVer="
+
this
.
unpVer
+
", method="
+
this
.
method
+
", filename="
+
this
.
filename
);
break
;
default
:
info
(
"Found a header of type 0x"
+
byteValueToHexString
(
this
.
headType
));
// skip the rest of the header bytes (for now)
bstream
.
readBytes
(
this
.
headSize
-
7
);
break
;
}
};
//var BLOCK_LZ = 0;
var
rLDecode
=
[
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
10
,
12
,
14
,
16
,
20
,
24
,
28
,
32
,
40
,
48
,
56
,
64
,
80
,
96
,
112
,
128
,
160
,
192
,
224
],
rLBits
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
4
,
4
,
4
,
4
,
5
,
5
,
5
,
5
],
rDBitLengthCounts
=
[
4
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
2
,
14
,
0
,
12
],
rSDDecode
=
[
0
,
4
,
8
,
16
,
32
,
64
,
128
,
192
],
rSDBits
=
[
2
,
2
,
3
,
4
,
5
,
6
,
6
,
6
];
var
rDDecode
=
[
0
,
1
,
2
,
3
,
4
,
6
,
8
,
12
,
16
,
24
,
32
,
48
,
64
,
96
,
128
,
192
,
256
,
384
,
512
,
768
,
1024
,
1536
,
2048
,
3072
,
4096
,
6144
,
8192
,
12288
,
16384
,
24576
,
32768
,
49152
,
65536
,
98304
,
131072
,
196608
,
262144
,
327680
,
393216
,
458752
,
524288
,
589824
,
655360
,
720896
,
786432
,
851968
,
917504
,
983040
];
var
rDBits
=
[
0
,
0
,
0
,
0
,
1
,
1
,
2
,
2
,
3
,
3
,
4
,
4
,
5
,
5
,
6
,
6
,
7
,
7
,
8
,
8
,
9
,
9
,
10
,
10
,
11
,
11
,
12
,
12
,
13
,
13
,
14
,
14
,
15
,
15
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
,
16
];
var
rLowDistRepCount
=
16
;
var
rNC
=
299
,
rDC
=
60
,
rLDC
=
17
,
rRC
=
28
,
rBC
=
20
,
rHuffTableSize
=
(
rNC
+
rDC
+
rRC
+
rLDC
);
//var UnpBlockType = BLOCK_LZ;
var
UnpOldTable
=
new
Array
(
rHuffTableSize
);
var
BD
=
{
//bitdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rBC
)
};
var
LD
=
{
//litdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rNC
)
};
var
DD
=
{
//distdecode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rDC
)
};
var
LDD
=
{
//low dist decode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rLDC
)
};
var
RD
=
{
//rep decode
DecodeLen
:
new
Array
(
16
),
DecodePos
:
new
Array
(
16
),
DecodeNum
:
new
Array
(
rRC
)
};
var
rBuffer
;
// read in Huffman tables for RAR
function
rarReadTables
(
bstream
)
{
var
BitLength
=
new
Array
(
rBC
),
Table
=
new
Array
(
rHuffTableSize
);
var
i
;
// before we start anything we need to get byte-aligned
bstream
.
readBits
(
(
8
-
bstream
.
bitPtr
)
&
0x7
);
if
(
bstream
.
readBits
(
1
))
{
info
(
"Error! PPM not implemented yet"
);
return
;
}
if
(
!
bstream
.
readBits
(
1
))
{
//discard old table
for
(
i
=
UnpOldTable
.
length
;
i
--
;)
UnpOldTable
[
i
]
=
0
;
}
// read in bit lengths
for
(
var
I
=
0
;
I
<
rBC
;
++
I
)
{
var
Length
=
bstream
.
readBits
(
4
);
if
(
Length
===
15
)
{
var
ZeroCount
=
bstream
.
readBits
(
4
);
if
(
ZeroCount
===
0
)
{
BitLength
[
I
]
=
15
;
}
else
{
ZeroCount
+=
2
;
while
(
ZeroCount
--
>
0
&&
I
<
rBC
)
{
BitLength
[
I
++
]
=
0
;
}
--
I
;
}
}
else
{
BitLength
[
I
]
=
Length
;
}
}
// now all 20 bit lengths are obtained, we construct the Huffman Table:
rarMakeDecodeTables
(
BitLength
,
0
,
BD
,
rBC
);
var
TableSize
=
rHuffTableSize
;
//console.log(DecodeLen, DecodePos, DecodeNum);
for
(
i
=
0
;
i
<
TableSize
;)
{
var
N
;
var
num
=
rarDecodeNumber
(
bstream
,
BD
);
if
(
num
<
16
)
{
Table
[
i
]
=
(
num
+
UnpOldTable
[
i
])
&
0xf
;
i
++
;
}
else
if
(
num
<
18
)
{
N
=
(
num
===
16
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
while
(
N
--
>
0
&&
i
<
TableSize
)
{
Table
[
i
]
=
Table
[
i
-
1
];
i
++
;
}
}
else
{
N
=
(
num
===
18
)
?
(
bstream
.
readBits
(
3
)
+
3
)
:
(
bstream
.
readBits
(
7
)
+
11
);
while
(
N
--
>
0
&&
i
<
TableSize
)
{
Table
[
i
++
]
=
0
;
}
}
}
rarMakeDecodeTables
(
Table
,
0
,
LD
,
rNC
);
rarMakeDecodeTables
(
Table
,
rNC
,
DD
,
rDC
);
rarMakeDecodeTables
(
Table
,
rNC
+
rDC
,
LDD
,
rLDC
);
rarMakeDecodeTables
(
Table
,
rNC
+
rDC
+
rLDC
,
RD
,
rRC
);
for
(
i
=
UnpOldTable
.
length
;
i
--
;)
{
UnpOldTable
[
i
]
=
Table
[
i
];
}
return
true
;
}
function
rarDecodeNumber
(
bstream
,
dec
)
{
var
DecodeLen
=
dec
.
DecodeLen
,
DecodePos
=
dec
.
DecodePos
,
DecodeNum
=
dec
.
DecodeNum
;
var
bitField
=
bstream
.
getBits
()
&
0xfffe
;
//some sort of rolled out binary search
var
bits
=
((
bitField
<
DecodeLen
[
8
])
?
((
bitField
<
DecodeLen
[
4
])
?
((
bitField
<
DecodeLen
[
2
])
?
((
bitField
<
DecodeLen
[
1
])
?
1
:
2
)
:
((
bitField
<
DecodeLen
[
3
])
?
3
:
4
))
:
(
bitField
<
DecodeLen
[
6
])
?
((
bitField
<
DecodeLen
[
5
])
?
5
:
6
)
:
((
bitField
<
DecodeLen
[
7
])
?
7
:
8
))
:
((
bitField
<
DecodeLen
[
12
])
?
((
bitField
<
DecodeLen
[
10
])
?
((
bitField
<
DecodeLen
[
9
])
?
9
:
10
)
:
((
bitField
<
DecodeLen
[
11
])
?
11
:
12
))
:
(
bitField
<
DecodeLen
[
14
])
?
((
bitField
<
DecodeLen
[
13
])
?
13
:
14
)
:
15
));
bstream
.
readBits
(
bits
);
var
N
=
DecodePos
[
bits
]
+
((
bitField
-
DecodeLen
[
bits
-
1
])
>>>
(
16
-
bits
));
return
DecodeNum
[
N
];
}
function
rarMakeDecodeTables
(
BitLength
,
offset
,
dec
,
size
)
{
var
DecodeLen
=
dec
.
DecodeLen
,
DecodePos
=
dec
.
DecodePos
,
DecodeNum
=
dec
.
DecodeNum
;
var
LenCount
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
],
TmpPos
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
],
N
=
0
,
M
=
0
;
var
i
;
for
(
i
=
DecodeNum
.
length
;
i
--
;)
DecodeNum
[
i
]
=
0
;
for
(
i
=
0
;
i
<
size
;
i
++
)
{
LenCount
[
BitLength
[
i
+
offset
]
&
0xF
]
++
;
}
LenCount
[
0
]
=
0
;
TmpPos
[
0
]
=
0
;
DecodePos
[
0
]
=
0
;
DecodeLen
[
0
]
=
0
;
var
I
;
for
(
I
=
1
;
I
<
16
;
++
I
)
{
N
=
2
*
(
N
+
LenCount
[
I
]);
M
=
(
N
<<
(
15
-
I
));
if
(
M
>
0xFFFF
)
{
M
=
0xFFFF
;
}
DecodeLen
[
I
]
=
M
;
DecodePos
[
I
]
=
DecodePos
[
I
-
1
]
+
LenCount
[
I
-
1
];
TmpPos
[
I
]
=
DecodePos
[
I
];
}
for
(
I
=
0
;
I
<
size
;
++
I
)
{
if
(
BitLength
[
I
+
offset
]
!==
0
)
{
DecodeNum
[
TmpPos
[
BitLength
[
offset
+
I
]
&
0xF
]
++
]
=
I
;
}
}
}
// TODO: implement
function
Unpack15
()
{
//bstream, Solid) {
info
(
"ERROR! RAR 1.5 compression not supported"
);
}
var
lowDistRepCount
=
0
,
prevLowDist
=
0
;
var
rOldDist
=
[
0
,
0
,
0
,
0
];
var
lastDist
=
0
;
var
lastLength
=
0
;
function
Unpack20
(
bstream
)
{
//, Solid) {
var
destUnpSize
=
rBuffer
.
data
.
length
;
var
oldDistPtr
=
0
;
var
Length
;
var
Distance
;
rarReadTables20
(
bstream
);
while
(
destUnpSize
>
rBuffer
.
ptr
)
{
var
num
=
rarDecodeNumber
(
bstream
,
LD
);
var
Bits
;
if
(
num
<
256
)
{
rBuffer
.
insertByte
(
num
);
continue
;
}
if
(
num
>
269
)
{
Length
=
rLDecode
[
num
-=
270
]
+
3
;
if
((
Bits
=
rLBits
[
num
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
var
DistNumber
=
rarDecodeNumber
(
bstream
,
DD
);
Distance
=
rDDecode
[
DistNumber
]
+
1
;
if
((
Bits
=
rDBits
[
DistNumber
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
if
(
Distance
>=
0x2000
)
{
Length
++
;
if
(
Distance
>=
0x40000
)
Length
++
;
}
lastLength
=
Length
;
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
Distance
;
rarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
===
269
)
{
rarReadTables20
(
bstream
);
rarUpdateProgress
();
continue
;
}
if
(
num
===
256
)
{
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
lastDist
;
rarCopyString
(
lastLength
,
lastDist
);
continue
;
}
if
(
num
<
261
)
{
Distance
=
rOldDist
[(
oldDistPtr
-
(
num
-
256
))
&
3
];
var
LengthNumber
=
rarDecodeNumber
(
bstream
,
RD
);
Length
=
rLDecode
[
LengthNumber
]
+
2
;
if
((
Bits
=
rLBits
[
LengthNumber
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
if
(
Distance
>=
0x101
)
{
Length
++
;
if
(
Distance
>=
0x2000
)
{
Length
++
;
if
(
Distance
>=
0x40000
)
Length
++
;
}
}
lastLength
=
Length
;
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
Distance
;
rarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
<
270
)
{
Distance
=
rSDDecode
[
num
-=
261
]
+
1
;
if
((
Bits
=
rSDBits
[
num
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
lastLength
=
2
;
lastDist
=
rOldDist
[
oldDistPtr
++
&
3
]
=
Distance
;
rarCopyString
(
2
,
Distance
);
continue
;
}
}
rarUpdateProgress
();
}
function
rarUpdateProgress
()
{
var
change
=
rBuffer
.
ptr
-
currentBytesUnarchivedInFile
;
currentBytesUnarchivedInFile
=
rBuffer
.
ptr
;
currentBytesUnarchived
+=
change
;
postProgress
();
}
var
rNC20
=
298
,
rDC20
=
48
,
rRC20
=
28
,
rBC20
=
19
,
rMC20
=
257
;
var
UnpOldTable20
=
new
Array
(
rMC20
*
4
);
function
rarReadTables20
(
bstream
)
{
var
BitLength
=
new
Array
(
rBC20
);
var
Table
=
new
Array
(
rMC20
*
4
);
var
TableSize
,
N
,
I
;
var
i
;
bstream
.
readBits
(
1
);
if
(
!
bstream
.
readBits
(
1
))
{
for
(
i
=
UnpOldTable20
.
length
;
i
--
;)
UnpOldTable20
[
i
]
=
0
;
}
TableSize
=
rNC20
+
rDC20
+
rRC20
;
for
(
I
=
0
;
I
<
rBC20
;
I
++
)
{
BitLength
[
I
]
=
bstream
.
readBits
(
4
);
}
rarMakeDecodeTables
(
BitLength
,
0
,
BD
,
rBC20
);
I
=
0
;
while
(
I
<
TableSize
)
{
var
num
=
rarDecodeNumber
(
bstream
,
BD
);
if
(
num
<
16
)
{
Table
[
I
]
=
num
+
UnpOldTable20
[
I
]
&
0xf
;
I
++
;
}
else
if
(
num
===
16
)
{
N
=
bstream
.
readBits
(
2
)
+
3
;
while
(
N
--
>
0
&&
I
<
TableSize
)
{
Table
[
I
]
=
Table
[
I
-
1
];
I
++
;
}
}
else
{
if
(
num
===
17
)
{
N
=
bstream
.
readBits
(
3
)
+
3
;
}
else
{
N
=
bstream
.
readBits
(
7
)
+
11
;
}
while
(
N
--
>
0
&&
I
<
TableSize
)
{
Table
[
I
++
]
=
0
;
}
}
}
rarMakeDecodeTables
(
Table
,
0
,
LD
,
rNC20
);
rarMakeDecodeTables
(
Table
,
rNC20
,
DD
,
rDC20
);
rarMakeDecodeTables
(
Table
,
rNC20
+
rDC20
,
RD
,
rRC20
);
for
(
i
=
UnpOldTable20
.
length
;
i
--
;)
UnpOldTable20
[
i
]
=
Table
[
i
];
}
function
Unpack29
(
bstream
)
{
// lazy initialize rDDecode and rDBits
var
DDecode
=
new
Array
(
rDC
);
var
DBits
=
new
Array
(
rDC
);
var
Distance
=
0
;
var
Length
=
0
;
var
Dist
=
0
,
BitLength
=
0
,
Slot
=
0
;
var
I
;
for
(
I
=
0
;
I
<
rDBitLengthCounts
.
length
;
I
++
,
BitLength
++
)
{
for
(
var
J
=
0
;
J
<
rDBitLengthCounts
[
I
];
J
++
,
Slot
++
,
Dist
+=
(
1
<<
BitLength
))
{
DDecode
[
Slot
]
=
Dist
;
DBits
[
Slot
]
=
BitLength
;
}
}
var
Bits
;
//tablesRead = false;
rOldDist
=
[
0
,
0
,
0
,
0
];
lastDist
=
0
;
lastLength
=
0
;
var
i
;
for
(
i
=
UnpOldTable
.
length
;
i
--
;)
UnpOldTable
[
i
]
=
0
;
// read in Huffman tables
rarReadTables
(
bstream
);
while
(
true
)
{
var
num
=
rarDecodeNumber
(
bstream
,
LD
);
if
(
num
<
256
)
{
rBuffer
.
insertByte
(
num
);
continue
;
}
if
(
num
>=
271
)
{
Length
=
rLDecode
[
num
-=
271
]
+
3
;
if
((
Bits
=
rLBits
[
num
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
var
DistNumber
=
rarDecodeNumber
(
bstream
,
DD
);
Distance
=
DDecode
[
DistNumber
]
+
1
;
if
((
Bits
=
DBits
[
DistNumber
])
>
0
)
{
if
(
DistNumber
>
9
)
{
if
(
Bits
>
4
)
{
Distance
+=
((
bstream
.
getBits
()
>>>
(
20
-
Bits
))
<<
4
);
bstream
.
readBits
(
Bits
-
4
);
//todo: check this
}
if
(
lowDistRepCount
>
0
)
{
lowDistRepCount
--
;
Distance
+=
prevLowDist
;
}
else
{
var
LowDist
=
rarDecodeNumber
(
bstream
,
LDD
);
if
(
LowDist
===
16
)
{
lowDistRepCount
=
rLowDistRepCount
-
1
;
Distance
+=
prevLowDist
;
}
else
{
Distance
+=
LowDist
;
prevLowDist
=
LowDist
;
}
}
}
else
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
}
if
(
Distance
>=
0x2000
)
{
Length
++
;
if
(
Distance
>=
0x40000
)
{
Length
++
;
}
}
rarInsertOldDist
(
Distance
);
rarInsertLastMatch
(
Length
,
Distance
);
rarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
===
256
)
{
if
(
!
rarReadEndOfBlock
(
bstream
))
break
;
continue
;
}
if
(
num
===
257
)
{
//console.log("READVMCODE");
if
(
!
rarReadVMCode
(
bstream
))
break
;
continue
;
}
if
(
num
===
258
)
{
if
(
lastLength
!=
0
)
{
rarCopyString
(
lastLength
,
lastDist
);
}
continue
;
}
if
(
num
<
263
)
{
var
DistNum
=
num
-
259
;
Distance
=
rOldDist
[
DistNum
];
for
(
var
I
=
DistNum
;
I
>
0
;
I
--
)
{
rOldDist
[
I
]
=
rOldDist
[
I
-
1
];
}
rOldDist
[
0
]
=
Distance
;
var
LengthNumber
=
rarDecodeNumber
(
bstream
,
RD
);
Length
=
rLDecode
[
LengthNumber
]
+
2
;
if
((
Bits
=
rLBits
[
LengthNumber
])
>
0
)
{
Length
+=
bstream
.
readBits
(
Bits
);
}
rarInsertLastMatch
(
Length
,
Distance
);
rarCopyString
(
Length
,
Distance
);
continue
;
}
if
(
num
<
272
)
{
Distance
=
rSDDecode
[
num
-=
263
]
+
1
;
if
((
Bits
=
rSDBits
[
num
])
>
0
)
{
Distance
+=
bstream
.
readBits
(
Bits
);
}
rarInsertOldDist
(
Distance
);
rarInsertLastMatch
(
2
,
Distance
);
rarCopyString
(
2
,
Distance
);
continue
;
}
}
rarUpdateProgress
();
}
function
rarReadEndOfBlock
(
bstream
)
{
rarUpdateProgress
();
var
NewTable
=
false
,
NewFile
=
false
;
if
(
bstream
.
readBits
(
1
))
{
NewTable
=
true
;
}
else
{
NewFile
=
true
;
NewTable
=
!!
bstream
.
readBits
(
1
);
}
//tablesRead = !NewTable;
return
!
(
NewFile
||
NewTable
&&
!
rarReadTables
(
bstream
));
}
function
rarReadVMCode
(
bstream
)
{
var
FirstByte
=
bstream
.
readBits
(
8
);
var
Length
=
(
FirstByte
&
7
)
+
1
;
if
(
Length
===
7
)
{
Length
=
bstream
.
readBits
(
8
)
+
7
;
}
else
if
(
Length
===
8
)
{
Length
=
bstream
.
readBits
(
16
);
}
var
vmCode
=
[];
for
(
var
I
=
0
;
I
<
Length
;
I
++
)
{
//do something here with cheking readbuf
vmCode
.
push
(
bstream
.
readBits
(
8
));
}
return
RarAddVMCode
(
FirstByte
,
vmCode
,
Length
);
}
function
RarAddVMCode
(
firstByte
,
vmCode
,
length
)
{
//console.log(vmCode);
if
(
vmCode
.
length
>
0
)
{
info
(
"Error! RarVM not supported yet!"
);
}
return
true
;
}
function
rarInsertLastMatch
(
length
,
distance
)
{
lastDist
=
distance
;
lastLength
=
length
;
}
function
rarInsertOldDist
(
distance
)
{
rOldDist
.
splice
(
3
,
1
);
rOldDist
.
splice
(
0
,
0
,
distance
);
}
//this is the real function, the other one is for debugging
function
rarCopyString
(
length
,
distance
)
{
var
destPtr
=
rBuffer
.
ptr
-
distance
;
if
(
destPtr
<
0
)
{
var
l
=
rOldBuffers
.
length
;
while
(
destPtr
<
0
)
{
destPtr
=
rOldBuffers
[
--
l
].
data
.
length
+
destPtr
;
}
//TODO: lets hope that it never needs to read beyond file boundaries
while
(
length
--
)
rBuffer
.
insertByte
(
rOldBuffers
[
l
].
data
[
destPtr
++
]);
}
if
(
length
>
distance
)
{
while
(
length
--
)
rBuffer
.
insertByte
(
rBuffer
.
data
[
destPtr
++
]);
}
else
{
rBuffer
.
insertBytes
(
rBuffer
.
data
.
subarray
(
destPtr
,
destPtr
+
length
));
}
}
var
rOldBuffers
=
[];
// v must be a valid RarVolume
function
unpack
(
v
)
{
// TODO: implement what happens when unpVer is < 15
var
Ver
=
v
.
header
.
unpVer
<=
15
?
15
:
v
.
header
.
unpVer
,
Solid
=
v
.
header
.
LHD_SOLID
,
bstream
=
new
bitjs
.
io
.
BitStream
(
v
.
fileData
.
buffer
,
true
/* rtl */
,
v
.
fileData
.
byteOffset
,
v
.
fileData
.
byteLength
);
rBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
v
.
header
.
unpackedSize
);
info
(
"Unpacking "
+
v
.
filename
+
" RAR v"
+
Ver
);
switch
(
Ver
)
{
case
15
:
// rar 1.5 compression
Unpack15
();
//(bstream, Solid);
break
;
case
20
:
// rar 2.x compression
case
26
:
// files larger than 2GB
Unpack20
(
bstream
);
//, Solid);
break
;
case
29
:
// rar 3.x compression
case
36
:
// alternative hash
Unpack29
(
bstream
);
break
;
}
// switch(method)
rOldBuffers
.
push
(
rBuffer
);
//TODO: clear these old buffers when there's over 4MB of history
return
rBuffer
.
data
;
}
// bstream is a bit stream
var
RarLocalFile
=
function
(
bstream
)
{
this
.
header
=
new
RarVolumeHeader
(
bstream
);
this
.
filename
=
this
.
header
.
filename
;
if
(
this
.
header
.
headType
!=
FILE_HEAD
&&
this
.
header
.
headType
!=
ENDARC_HEAD
)
{
this
.
isValid
=
false
;
info
(
"Error! RAR Volume did not include a FILE_HEAD header "
);
}
else
{
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
header
.
packSize
>
0
)
{
this
.
fileData
=
bstream
.
readBytes
(
this
.
header
.
packSize
);
this
.
isValid
=
true
;
}
}
};
RarLocalFile
.
prototype
.
unrar
=
function
()
{
if
(
!
this
.
header
.
flags
.
LHD_SPLIT_BEFORE
)
{
// unstore file
if
(
this
.
header
.
method
===
0x30
)
{
info
(
"Unstore "
+
this
.
filename
);
this
.
isValid
=
true
;
currentBytesUnarchivedInFile
+=
this
.
fileData
.
length
;
currentBytesUnarchived
+=
this
.
fileData
.
length
;
// Create a new buffer and copy it over.
var
len
=
this
.
header
.
packSize
;
var
newBuffer
=
new
bitjs
.
io
.
ByteBuffer
(
len
);
newBuffer
.
insertBytes
(
this
.
fileData
);
this
.
fileData
=
newBuffer
.
data
;
}
else
{
this
.
isValid
=
true
;
this
.
fileData
=
unpack
(
this
);
}
}
};
var
unrar
=
function
(
arrayBuffer
)
{
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
var
bstream
=
new
bitjs
.
io
.
BitStream
(
arrayBuffer
,
false
/* rtl */
);
var
header
=
new
RarVolumeHeader
(
bstream
);
if
(
header
.
crc
===
0x6152
&&
header
.
headType
===
0x72
&&
header
.
flags
.
value
===
0x1A21
&&
header
.
headSize
===
7
)
{
info
(
"Found RAR signature"
);
var
mhead
=
new
RarVolumeHeader
(
bstream
);
if
(
mhead
.
headType
!=
MAIN_HEAD
)
{
info
(
"Error! RAR did not include a MAIN_HEAD header"
);
}
else
{
var
localFiles
=
[];
var
localFile
=
null
;
do
{
try
{
localFile
=
new
RarLocalFile
(
bstream
);
info
(
"RAR localFile isValid="
+
localFile
.
isValid
+
", volume packSize="
+
localFile
.
header
.
packSize
);
if
(
localFile
&&
localFile
.
isValid
&&
localFile
.
header
.
packSize
>
0
)
{
totalUncompressedBytesInArchive
+=
localFile
.
header
.
unpackedSize
;
localFiles
.
push
(
localFile
);
}
else
if
(
localFile
.
header
.
packSize
===
0
&&
localFile
.
header
.
unpackedSize
===
0
)
{
localFile
.
isValid
=
true
;
}
}
catch
(
err
)
{
break
;
}
//info("bstream" + bstream.bytePtr+"/"+bstream.bytes.length);
}
while
(
localFile
.
isValid
);
totalFilesInArchive
=
localFiles
.
length
;
// now we have all information but things are unpacked
// TODO: unpack
localFiles
=
localFiles
.
sort
(
function
(
a
,
b
)
{
var
aname
=
a
.
filename
.
toLowerCase
();
var
bname
=
b
.
filename
.
toLowerCase
();
return
aname
>
bname
?
1
:
-
1
;
});
info
(
localFiles
.
map
(
function
(
a
)
{
return
a
.
filename
;}).
join
(
", "
));
for
(
var
i
=
0
;
i
<
localFiles
.
length
;
++
i
)
{
var
localfile
=
localFiles
[
i
];
// update progress
currentFilename
=
localfile
.
header
.
filename
;
currentBytesUnarchivedInFile
=
0
;
// actually do the unzipping
localfile
.
unrar
();
if
(
localfile
.
isValid
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localfile
));
postProgress
();
}
}
postProgress
();
}
}
else
{
err
(
"Invalid RAR file"
);
}
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
};
// event.data.file has the ArrayBuffer.
onmessage
=
function
(
event
)
{
var
ab
=
event
.
data
.
file
;
unrar
(
ab
,
true
);
};
cps/static/js/unzip.js
deleted
100644 → 0
View file @
479b4b7d
/**
* unzip.js
*
* Copyright(c) 2011 Google Inc.
* Copyright(c) 2011 antimatter15
*
* Reference Documentation:
*
* ZIP format: http://www.pkware.com/documents/casestudies/APPNOTE.TXT
* DEFLATE format: http://tools.ietf.org/html/rfc1951
*/
/* global bitjs, importScripts, Uint8Array*/
// This file expects to be invoked as a Worker (see onmessage below).
importScripts
(
"io.js"
);
importScripts
(
"archive.js"
);
// Progress variables.
var
currentFilename
=
""
;
var
currentFileNumber
=
0
;
var
currentBytesUnarchivedInFile
=
0
;
var
currentBytesUnarchived
=
0
;
var
totalUncompressedBytesInArchive
=
0
;
var
totalFilesInArchive
=
0
;
// Helper functions.
var
info
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveInfoEvent
(
str
));
};
var
err
=
function
(
str
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveErrorEvent
(
str
));
};
var
postProgress
=
function
()
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveProgressEvent
(
currentFilename
,
currentFileNumber
,
currentBytesUnarchivedInFile
,
currentBytesUnarchived
,
totalUncompressedBytesInArchive
,
totalFilesInArchive
));
};
var
zLocalFileHeaderSignature
=
0x04034b50
;
var
zArchiveExtraDataSignature
=
0x08064b50
;
var
zCentralFileHeaderSignature
=
0x02014b50
;
var
zDigitalSignatureSignature
=
0x05054b50
;
// takes a ByteStream and parses out the local file information
var
ZipLocalFile
=
function
(
bstream
)
{
if
(
typeof
bstream
!=
typeof
{}
||
!
bstream
.
readNumber
||
typeof
bstream
.
readNumber
!=
typeof
function
()
{})
{
return
null
;
}
bstream
.
readNumber
(
4
);
// swallow signature
this
.
version
=
bstream
.
readNumber
(
2
);
this
.
generalPurpose
=
bstream
.
readNumber
(
2
);
this
.
compressionMethod
=
bstream
.
readNumber
(
2
);
this
.
lastModFileTime
=
bstream
.
readNumber
(
2
);
this
.
lastModFileDate
=
bstream
.
readNumber
(
2
);
this
.
crc32
=
bstream
.
readNumber
(
4
);
this
.
compressedSize
=
bstream
.
readNumber
(
4
);
this
.
uncompressedSize
=
bstream
.
readNumber
(
4
);
this
.
fileNameLength
=
bstream
.
readNumber
(
2
);
this
.
extraFieldLength
=
bstream
.
readNumber
(
2
);
this
.
filename
=
null
;
if
(
this
.
fileNameLength
>
0
)
{
this
.
filename
=
bstream
.
readString
(
this
.
fileNameLength
);
}
info
(
"Zip Local File Header:"
);
info
(
" version="
+
this
.
version
);
info
(
" general purpose="
+
this
.
generalPurpose
);
info
(
" compression method="
+
this
.
compressionMethod
);
info
(
" last mod file time="
+
this
.
lastModFileTime
);
info
(
" last mod file date="
+
this
.
lastModFileDate
);
info
(
" crc32="
+
this
.
crc32
);
info
(
" compressed size="
+
this
.
compressedSize
);
info
(
" uncompressed size="
+
this
.
uncompressedSize
);
info
(
" file name length="
+
this
.
fileNameLength
);
info
(
" extra field length="
+
this
.
extraFieldLength
);
info
(
" filename = '"
+
this
.
filename
+
"'"
);
this
.
extraField
=
null
;
if
(
this
.
extraFieldLength
>
0
)
{
this
.
extraField
=
bstream
.
readString
(
this
.
extraFieldLength
);
info
(
" extra field="
+
this
.
extraField
);
}
// read in the compressed data
this
.
fileData
=
null
;
if
(
this
.
compressedSize
>
0
)
{
this
.
fileData
=
new
Uint8Array
(
bstream
.
bytes
.
buffer
,
bstream
.
ptr
,
this
.
compressedSize
);
bstream
.
ptr
+=
this
.
compressedSize
;
}
// TODO: deal with data descriptor if present (we currently assume no data descriptor!)
// "This descriptor exists only if bit 3 of the general purpose bit flag is set"
// But how do you figure out how big the file data is if you don't know the compressedSize
// from the header?!?
if
((
this
.
generalPurpose
&
bitjs
.
BIT
[
3
])
!=
0
)
{
this
.
crc32
=
bstream
.
readNumber
(
4
);
this
.
compressedSize
=
bstream
.
readNumber
(
4
);
this
.
uncompressedSize
=
bstream
.
readNumber
(
4
);
}
};
// determine what kind of compressed data we have and decompress
ZipLocalFile
.
prototype
.
unzip
=
function
()
{
// Zip Version 1.0, no compression (store only)
if
(
this
.
compressionMethod
==
0
)
{
info
(
"ZIP v"
+
this
.
version
+
", store only: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
currentBytesUnarchivedInFile
=
this
.
compressedSize
;
currentBytesUnarchived
+=
this
.
compressedSize
;
this
.
fileData
=
zeroCompression
(
this
.
fileData
,
this
.
uncompressedSize
);
}
// version == 20, compression method == 8 (DEFLATE)
else
if
(
this
.
compressionMethod
==
8
)
{
info
(
"ZIP v2.0, DEFLATE: "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
this
.
fileData
=
inflate
(
this
.
fileData
,
this
.
uncompressedSize
);
}
else
{
err
(
"UNSUPPORTED VERSION/FORMAT: ZIP v"
+
this
.
version
+
", compression method="
+
this
.
compressionMethod
+
": "
+
this
.
filename
+
" ("
+
this
.
compressedSize
+
" bytes)"
);
this
.
fileData
=
null
;
}
};
// Takes an ArrayBuffer of a zip file in
// returns null on error
// returns an array of DecompressedFile objects on success
var
unzip
=
function
(
arrayBuffer
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveStartEvent
());
currentFilename
=
""
;
currentFileNumber
=
0
;
currentBytesUnarchivedInFile
=
0
;
currentBytesUnarchived
=
0
;
totalUncompressedBytesInArchive
=
0
;
totalFilesInArchive
=
0
;
currentBytesUnarchived
=
0
;
var
bstream
=
new
bitjs
.
io
.
ByteStream
(
arrayBuffer
);
// detect local file header signature or return null
if
(
bstream
.
peekNumber
(
4
)
==
zLocalFileHeaderSignature
)
{
var
localFiles
=
[];
// loop until we don't see any more local files
while
(
bstream
.
peekNumber
(
4
)
==
zLocalFileHeaderSignature
)
{
var
oneLocalFile
=
new
ZipLocalFile
(
bstream
);
// this should strip out directories/folders
if
(
oneLocalFile
&&
oneLocalFile
.
uncompressedSize
>
0
&&
oneLocalFile
.
fileData
)
{
localFiles
.
push
(
oneLocalFile
);
totalUncompressedBytesInArchive
+=
oneLocalFile
.
uncompressedSize
;
}
}
totalFilesInArchive
=
localFiles
.
length
;
// got all local files, now sort them
localFiles
.
sort
(
function
(
a
,
b
)
{
var
aname
=
a
.
filename
.
toLowerCase
();
var
bname
=
b
.
filename
.
toLowerCase
();
return
aname
>
bname
?
1
:
-
1
;
});
// archive extra data record
if
(
bstream
.
peekNumber
(
4
)
==
zArchiveExtraDataSignature
)
{
info
(
" Found an Archive Extra Data Signature"
);
// skipping this record for now
bstream
.
readNumber
(
4
);
var
archiveExtraFieldLength
=
bstream
.
readNumber
(
4
);
bstream
.
readString
(
archiveExtraFieldLength
);
}
// central directory structure
// TODO: handle the rest of the structures (Zip64 stuff)
if
(
bstream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
info
(
" Found a Central File Header"
);
// read all file headers
while
(
bstream
.
peekNumber
(
4
)
==
zCentralFileHeaderSignature
)
{
bstream
.
readNumber
(
4
);
// signature
bstream
.
readNumber
(
2
);
// version made by
bstream
.
readNumber
(
2
);
// version needed to extract
bstream
.
readNumber
(
2
);
// general purpose bit flag
bstream
.
readNumber
(
2
);
// compression method
bstream
.
readNumber
(
2
);
// last mod file time
bstream
.
readNumber
(
2
);
// last mod file date
bstream
.
readNumber
(
4
);
// crc32
bstream
.
readNumber
(
4
);
// compressed size
bstream
.
readNumber
(
4
);
// uncompressed size
var
fileNameLength
=
bstream
.
readNumber
(
2
);
// file name length
var
extraFieldLength
=
bstream
.
readNumber
(
2
);
// extra field length
var
fileCommentLength
=
bstream
.
readNumber
(
2
);
// file comment length
bstream
.
readNumber
(
2
);
// disk number start
bstream
.
readNumber
(
2
);
// internal file attributes
bstream
.
readNumber
(
4
);
// external file attributes
bstream
.
readNumber
(
4
);
// relative offset of local header
bstream
.
readString
(
fileNameLength
);
// file name
bstream
.
readString
(
extraFieldLength
);
// extra field
bstream
.
readString
(
fileCommentLength
);
// file comment
}
}
// digital signature
if
(
bstream
.
peekNumber
(
4
)
==
zDigitalSignatureSignature
)
{
info
(
" Found a Digital Signature"
);
bstream
.
readNumber
(
4
);
var
sizeOfSignature
=
bstream
.
readNumber
(
2
);
bstream
.
readString
(
sizeOfSignature
);
// digital signature data
}
// report # files and total length
if
(
localFiles
.
length
>
0
)
{
postProgress
();
}
// now do the unzipping of each file
for
(
var
i
=
0
;
i
<
localFiles
.
length
;
++
i
)
{
var
localfile
=
localFiles
[
i
];
// update progress
currentFilename
=
localfile
.
filename
;
currentFileNumber
=
i
;
currentBytesUnarchivedInFile
=
0
;
// actually do the unzipping
localfile
.
unzip
();
if
(
localfile
.
fileData
!=
null
)
{
postMessage
(
new
bitjs
.
archive
.
UnarchiveExtractEvent
(
localfile
));
postProgress
();
}
}
postProgress
();
postMessage
(
new
bitjs
.
archive
.
UnarchiveFinishEvent
());
}
};
// returns a table of Huffman codes
// each entry's index is its code and its value is a JavaScript object
// containing {length: 6, symbol: X}
function
getHuffmanCodes
(
bitLengths
)
{
// ensure bitLengths is an array containing at least one element
if
(
typeof
bitLengths
!=
typeof
[]
||
bitLengths
.
length
<
1
)
{
err
(
"Error! getHuffmanCodes() called with an invalid array"
);
return
null
;
}
// Reference: http://tools.ietf.org/html/rfc1951#page-8
var
numLengths
=
bitLengths
.
length
,
blCount
=
[],
MAX_BITS
=
1
;
// Step 1: count up how many codes of each length we have
for
(
var
i
=
0
;
i
<
numLengths
;
++
i
)
{
var
length
=
bitLengths
[
i
];
// test to ensure each bit length is a positive, non-zero number
if
(
typeof
length
!=
typeof
1
||
length
<
0
)
{
err
(
"bitLengths contained an invalid number in getHuffmanCodes(): "
+
length
+
" of type "
+
(
typeof
length
));
return
null
;
}
// increment the appropriate bitlength count
if
(
blCount
[
length
]
==
undefined
)
blCount
[
length
]
=
0
;
// a length of zero means this symbol is not participating in the huffman coding
if
(
length
>
0
)
blCount
[
length
]
++
;
if
(
length
>
MAX_BITS
)
MAX_BITS
=
length
;
}
// Step 2: Find the numerical value of the smallest code for each code length
var
nextCode
=
[],
code
=
0
;
for
(
var
bits
=
1
;
bits
<=
MAX_BITS
;
++
bits
)
{
var
length
=
bits
-
1
;
// ensure undefined lengths are zero
if
(
blCount
[
length
]
==
undefined
)
blCount
[
length
]
=
0
;
code
=
(
code
+
blCount
[
bits
-
1
])
<<
1
;
nextCode
[
bits
]
=
code
;
}
// Step 3: Assign numerical values to all codes
var
table
=
{},
tableLength
=
0
;
for
(
var
n
=
0
;
n
<
numLengths
;
++
n
)
{
var
len
=
bitLengths
[
n
];
if
(
len
!=
0
)
{
table
[
nextCode
[
len
]]
=
{
length
:
len
,
symbol
:
n
};
//, bitstring: binaryValueToString(nextCode [len],len) };
tableLength
++
;
nextCode
[
len
]
++
;
}
}
table
.
maxLength
=
tableLength
;
return
table
;
}
/*
The Huffman codes for the two alphabets are fixed, and are not
represented explicitly in the data. The Huffman code lengths
for the literal/length alphabet are:
Lit Value Bits Codes
--------- ---- -----
0 - 143 8 00110000 through
10111111
144 - 255 9 110010000 through
111111111
256 - 279 7 0000000 through
0010111
280 - 287 8 11000000 through
11000111
*/
// fixed Huffman codes go from 7-9 bits, so we need an array whose index can hold up to 9 bits
var
fixedHCtoLiteral
=
null
;
var
fixedHCtoDistance
=
null
;
function
getFixedLiteralTable
()
{
// create once
if
(
!
fixedHCtoLiteral
)
{
var
bitlengths
=
new
Array
(
288
);
var
i
;
for
(
i
=
0
;
i
<=
143
;
++
i
)
bitlengths
[
i
]
=
8
;
for
(
i
=
144
;
i
<=
255
;
++
i
)
bitlengths
[
i
]
=
9
;
for
(
i
=
256
;
i
<=
279
;
++
i
)
bitlengths
[
i
]
=
7
;
for
(
i
=
280
;
i
<=
287
;
++
i
)
bitlengths
[
i
]
=
8
;
// get huffman code table
fixedHCtoLiteral
=
getHuffmanCodes
(
bitlengths
);
}
return
fixedHCtoLiteral
;
}
function
getFixedDistanceTable
()
{
// create once
if
(
!
fixedHCtoDistance
)
{
var
bitlengths
=
new
Array
(
32
);
for
(
var
i
=
0
;
i
<
32
;
++
i
)
{
bitlengths
[
i
]
=
5
;
}
// get huffman code table
fixedHCtoDistance
=
getHuffmanCodes
(
bitlengths
);
}
return
fixedHCtoDistance
;
}
// extract one bit at a time until we find a matching Huffman Code
// then return that symbol
function
decodeSymbol
(
bstream
,
hcTable
)
{
var
code
=
0
,
len
=
0
;
// loop until we match
for
(;;)
{
// read in next bit
var
bit
=
bstream
.
readBits
(
1
);
code
=
(
code
<<
1
)
|
bit
;
++
len
;
// check against Huffman Code table and break if found
if
(
hcTable
.
hasOwnProperty
(
code
)
&&
hcTable
[
code
].
length
==
len
)
{
break
;
}
if
(
len
>
hcTable
.
maxLength
)
{
err
(
"Bit stream out of sync, didn't find a Huffman Code, length was "
+
len
+
" and table only max code length of "
+
hcTable
.
maxLength
);
break
;
}
}
return
hcTable
[
code
].
symbol
;
}
var
CodeLengthCodeOrder
=
[
16
,
17
,
18
,
0
,
8
,
7
,
9
,
6
,
10
,
5
,
11
,
4
,
12
,
3
,
13
,
2
,
14
,
1
,
15
];
/*
Extra Extra Extra
Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
---- ---- ------ ---- ---- ------- ---- ---- -------
257 0 3 267 1 15,16 277 4 67-82
258 0 4 268 1 17,18 278 4 83-98
259 0 5 269 2 19-22 279 4 99-114
260 0 6 270 2 23-26 280 4 115-130
261 0 7 271 2 27-30 281 5 131-162
262 0 8 272 2 31-34 282 5 163-194
263 0 9 273 3 35-42 283 5 195-226
264 0 10 274 3 43-50 284 5 227-257
265 1 11,12 275 3 51-58 285 0 258
266 1 13,14 276 3 59-66
*/
var
LengthLookupTable
=
[
[
0
,
3
],
[
0
,
4
],
[
0
,
5
],
[
0
,
6
],
[
0
,
7
],
[
0
,
8
],
[
0
,
9
],
[
0
,
10
],
[
1
,
11
],
[
1
,
13
],
[
1
,
15
],
[
1
,
17
],
[
2
,
19
],
[
2
,
23
],
[
2
,
27
],
[
2
,
31
],
[
3
,
35
],
[
3
,
43
],
[
3
,
51
],
[
3
,
59
],
[
4
,
67
],
[
4
,
83
],
[
4
,
99
],
[
4
,
115
],
[
5
,
131
],
[
5
,
163
],
[
5
,
195
],
[
5
,
227
],
[
0
,
258
]
];
/*
Extra Extra Extra
Code Bits Dist Code Bits Dist Code Bits Distance
---- ---- ---- ---- ---- ------ ---- ---- --------
0 0 1 10 4 33-48 20 9 1025-1536
1 0 2 11 4 49-64 21 9 1537-2048
2 0 3 12 5 65-96 22 10 2049-3072
3 0 4 13 5 97-128 23 10 3073-4096
4 1 5,6 14 6 129-192 24 11 4097-6144
5 1 7,8 15 6 193-256 25 11 6145-8192
6 2 9-12 16 7 257-384 26 12 8193-12288
7 2 13-16 17 7 385-512 27 12 12289-16384
8 3 17-24 18 8 513-768 28 13 16385-24576
9 3 25-32 19 8 769-1024 29 13 24577-32768
*/
var
DistLookupTable
=
[
[
0
,
1
],
[
0
,
2
],
[
0
,
3
],
[
0
,
4
],
[
1
,
5
],
[
1
,
7
],
[
2
,
9
],
[
2
,
13
],
[
3
,
17
],
[
3
,
25
],
[
4
,
33
],
[
4
,
49
],
[
5
,
65
],
[
5
,
97
],
[
6
,
129
],
[
6
,
193
],
[
7
,
257
],
[
7
,
385
],
[
8
,
513
],
[
8
,
769
],
[
9
,
1025
],
[
9
,
1537
],
[
10
,
2049
],
[
10
,
3073
],
[
11
,
4097
],
[
11
,
6145
],
[
12
,
8193
],
[
12
,
12289
],
[
13
,
16385
],
[
13
,
24577
]
];
function
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
)
{
/*
loop (until end of block code recognized)
decode literal/length value from input stream
if value < 256
copy value (literal byte) to output stream
otherwise
if value = end of block (256)
break from loop
otherwise (value = 257..285)
decode distance from input stream
move backwards distance bytes in the output
stream, and copy length bytes from this
position to the output stream.
*/
var
blockSize
=
0
;
for
(;;)
{
var
symbol
=
decodeSymbol
(
bstream
,
hcLiteralTable
);
if
(
symbol
<
256
)
{
// copy literal byte to output
buffer
.
insertByte
(
symbol
);
blockSize
++
;
}
else
{
// end of block reached
if
(
symbol
==
256
)
{
break
;
}
else
{
var
lengthLookup
=
LengthLookupTable
[
symbol
-
257
],
length
=
lengthLookup
[
1
]
+
bstream
.
readBits
(
lengthLookup
[
0
]),
distLookup
=
DistLookupTable
[
decodeSymbol
(
bstream
,
hcDistanceTable
)],
distance
=
distLookup
[
1
]
+
bstream
.
readBits
(
distLookup
[
0
]);
// now apply length and distance appropriately and copy to output
// TODO: check that backward distance < data.length?
// http://tools.ietf.org/html/rfc1951#page-11
// "Note also that the referenced string may overlap the current
// position; for example, if the last 2 bytes decoded have values
// X and Y, a string reference with <length = 5, distance = 2>
// adds X,Y,X,Y,X to the output stream."
//
// loop for each character
var
ch
=
buffer
.
ptr
-
distance
;
blockSize
+=
length
;
if
(
length
>
distance
)
{
var
data
=
buffer
.
data
;
while
(
length
--
)
{
buffer
.
insertByte
(
data
[
ch
++
]);
}
}
else
{
buffer
.
insertBytes
(
buffer
.
data
.
subarray
(
ch
,
ch
+
length
));
}
}
// length-distance pair
}
// length-distance pair or end-of-block
}
// loop until we reach end of block
return
blockSize
;
}
function
zeroCompression
(
compressedData
,
numDecompressedBytes
)
{
var
bstream
=
new
bitjs
.
io
.
BitStream
(
compressedData
.
buffer
,
false
/* rtl */
,
compressedData
.
byteOffset
,
compressedData
.
byteLength
);
var
buffer
=
new
bitjs
.
io
.
ByteBuffer
(
numDecompressedBytes
);
buffer
.
insertBytes
(
bstream
.
readBytes
(
numDecompressedBytes
));
return
buffer
.
data
;
}
// {Uint8Array} compressedData A Uint8Array of the compressed file data.
// compression method 8
// deflate: http://tools.ietf.org/html/rfc1951
function
inflate
(
compressedData
,
numDecompressedBytes
)
{
// Bit stream representing the compressed data.
var
bstream
=
new
bitjs
.
io
.
BitStream
(
compressedData
.
buffer
,
false
/* rtl */
,
compressedData
.
byteOffset
,
compressedData
.
byteLength
);
var
buffer
=
new
bitjs
.
io
.
ByteBuffer
(
numDecompressedBytes
);
var
numBlocks
=
0
,
blockSize
=
0
;
// block format: http://tools.ietf.org/html/rfc1951#page-9
do
{
var
bFinal
=
bstream
.
readBits
(
1
),
bType
=
bstream
.
readBits
(
2
);
blockSize
=
0
;
++
numBlocks
;
// no compression
if
(
bType
==
0
)
{
// skip remaining bits in this byte
while
(
bstream
.
bitPtr
!=
0
)
bstream
.
readBits
(
1
);
var
len
=
bstream
.
readBits
(
16
);
bstream
.
readBits
(
16
);
// TODO: check if nlen is the ones-complement of len?
if
(
len
>
0
)
buffer
.
insertBytes
(
bstream
.
readBytes
(
len
));
blockSize
=
len
;
}
// fixed Huffman codes
else
if
(
bType
==
1
)
{
blockSize
=
inflateBlockData
(
bstream
,
getFixedLiteralTable
(),
getFixedDistanceTable
(),
buffer
);
}
// dynamic Huffman codes
else
if
(
bType
==
2
)
{
var
numLiteralLengthCodes
=
bstream
.
readBits
(
5
)
+
257
;
var
numDistanceCodes
=
bstream
.
readBits
(
5
)
+
1
,
numCodeLengthCodes
=
bstream
.
readBits
(
4
)
+
4
;
// populate the array of code length codes (first de-compaction)
var
codeLengthsCodeLengths
=
[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
];
for
(
var
i
=
0
;
i
<
numCodeLengthCodes
;
++
i
)
{
codeLengthsCodeLengths
[
CodeLengthCodeOrder
[
i
]
]
=
bstream
.
readBits
(
3
);
}
// get the Huffman Codes for the code lengths
var
codeLengthsCodes
=
getHuffmanCodes
(
codeLengthsCodeLengths
);
// now follow this mapping
/*
0 - 15: Represent code lengths of 0 - 15
16: Copy the previous code length 3 - 6 times.
The next 2 bits indicate repeat length
(0 = 3, ... , 3 = 6)
Example: Codes 8, 16 (+2 bits 11),
16 (+2 bits 10) will expand to
12 code lengths of 8 (1 + 6 + 5)
17: Repeat a code length of 0 for 3 - 10 times.
(3 bits of length)
18: Repeat a code length of 0 for 11 - 138 times
(7 bits of length)
*/
// to generate the true code lengths of the Huffman Codes for the literal
// and distance tables together
var
literalCodeLengths
=
[];
var
prevCodeLength
=
0
;
while
(
literalCodeLengths
.
length
<
numLiteralLengthCodes
+
numDistanceCodes
)
{
var
symbol
=
decodeSymbol
(
bstream
,
codeLengthsCodes
);
if
(
symbol
<=
15
)
{
literalCodeLengths
.
push
(
symbol
);
prevCodeLength
=
symbol
;
}
else
if
(
symbol
==
16
)
{
var
repeat
=
bstream
.
readBits
(
2
)
+
3
;
while
(
repeat
--
)
{
literalCodeLengths
.
push
(
prevCodeLength
);
}
}
else
if
(
symbol
==
17
)
{
var
repeat1
=
bstream
.
readBits
(
3
)
+
3
;
while
(
repeat1
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
else
if
(
symbol
==
18
)
{
var
repeat2
=
bstream
.
readBits
(
7
)
+
11
;
while
(
repeat2
--
)
{
literalCodeLengths
.
push
(
0
);
}
}
}
// now split the distance code lengths out of the literal code array
var
distanceCodeLengths
=
literalCodeLengths
.
splice
(
numLiteralLengthCodes
,
numDistanceCodes
);
// now generate the true Huffman Code tables using these code lengths
var
hcLiteralTable
=
getHuffmanCodes
(
literalCodeLengths
),
hcDistanceTable
=
getHuffmanCodes
(
distanceCodeLengths
);
blockSize
=
inflateBlockData
(
bstream
,
hcLiteralTable
,
hcDistanceTable
,
buffer
);
}
else
{
// error
err
(
"Error! Encountered deflate block of type 3"
);
return
null
;
}
// update progress
currentBytesUnarchivedInFile
+=
blockSize
;
currentBytesUnarchived
+=
blockSize
;
postProgress
();
}
while
(
bFinal
!=
1
);
// we are done reading blocks if the bFinal bit was set for this block
// return the buffer data bytes
return
buffer
.
data
;
}
// event.data.file has the ArrayBuffer.
onmessage
=
function
(
event
)
{
unzip
(
event
.
data
.
file
,
true
);
};
cps/templates/readcbr.html
View file @
c0d136cc
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
<script
src=
"{{ url_for('static', filename='js/libs/jquery.min.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/libs/jquery.min.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/libs/screenfull.min.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/libs/screenfull.min.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/kthoom.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/kthoom.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/archive.js') }}"
></script>
<script
src=
"{{ url_for('static', filename='js/archive
/archive
.js') }}"
></script>
<script>
<script>
var
updateArrows
=
function
()
{
var
updateArrows
=
function
()
{
if
(
$
(
'input[name="direction"]:checked'
).
val
()
===
"0"
)
{
if
(
$
(
'input[name="direction"]:checked'
).
val
()
===
"0"
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment