=> ({\n applyToStack: (clientStack) => {\n clientStack.addRelativeTo(bucketEndpointMiddleware(options), bucketEndpointMiddlewareOptions);\n },\n});\n","import { __extends } from \"tslib\";\nimport { GetObjectOutput, GetObjectRequest } from \"../models/models_0\";\nimport { deserializeAws_restXmlGetObjectCommand, serializeAws_restXmlGetObjectCommand } from \"../protocols/Aws_restXml\";\nimport { getBucketEndpointPlugin } from \"@aws-sdk/middleware-bucket-endpoint\";\nimport { getSerdePlugin } from \"@aws-sdk/middleware-serde\";\nimport { getSsecPlugin } from \"@aws-sdk/middleware-ssec\";\nimport { Command as $Command } from \"@aws-sdk/smithy-client\";\n/**\n * Retrieves objects from Amazon S3. To use GET
, you must have READ
\n * access to the object. If you grant READ
access to the anonymous user, you can\n * return the object without using an authorization header.
\n *\n * An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n * file system. You can, however, create a logical hierarchy by using object key names that\n * imply a folder structure. For example, instead of naming an object sample.jpg
,\n * you can name it photos/2006/February/sample.jpg
.
\n *\n * To get an object from such a logical hierarchy, specify the full key name for the object\n * in the GET
operation. For a virtual hosted-style request example, if you have\n * the object photos/2006/February/sample.jpg
, specify the resource as\n * /photos/2006/February/sample.jpg
. For a path-style request example, if you\n * have the object photos/2006/February/sample.jpg
in the bucket named\n * examplebucket
, specify the resource as\n * /examplebucket/photos/2006/February/sample.jpg
. For more information about\n * request types, see HTTP Host Header Bucket Specification.
\n *\n * To distribute large files to many people, you can save bandwidth costs by using\n * BitTorrent. For more information, see Amazon S3\n * Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
\n *\n * If the object you are retrieving is stored in the S3 Glacier or\n * S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n * S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n * copy using RestoreObject. Otherwise, this operation returns an\n * InvalidObjectStateError
error. For information about restoring archived\n * objects, see Restoring Archived\n * Objects.
\n *\n * Encryption request headers, like x-amz-server-side-encryption
, should not\n * be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS\n * KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n * object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
\n * If you encrypt an object by using server-side encryption with customer-provided\n * encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n * you must use the following headers:
\n * \n * - \n *
x-amz-server-side-encryption-customer-algorithm
\n * \n * - \n *
x-amz-server-side-encryption-customer-key
\n * \n * - \n *
x-amz-server-side-encryption-customer-key-MD5
\n * \n *
\n * For more information about SSE-C, see Server-Side Encryption (Using\n * Customer-Provided Encryption Keys).
\n *\n * Assuming you have permission to read object tags (permission for the\n * s3:GetObjectVersionTagging
action), the response also returns the\n * x-amz-tagging-count
header that provides the count of number of tags\n * associated with the object. You can use GetObjectTagging to retrieve\n * the tag set associated with an object.
\n *\n * \n * Permissions\n *
\n * You need the s3:GetObject
permission for this operation. For more\n * information, see Specifying Permissions\n * in a Policy. If the object you request does not exist, the error Amazon S3 returns\n * depends on whether you also have the s3:ListBucket
permission.
\n * \n * - \n *
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will\n * return an HTTP status code 404 (\"no such key\") error.
\n * \n * - \n *
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an\n * HTTP status code 403 (\"access denied\") error.
\n * \n *
\n *\n *\n * \n * Versioning\n *
\n * By default, the GET operation returns the current version of an object. To return a\n * different version, use the versionId
subresource.
\n *\n * \n * If the current version of the object is a delete marker, Amazon S3 behaves as if the\n * object was deleted and includes x-amz-delete-marker: true
in the\n * response.
\n * \n *\n *\n * For more information about versioning, see PutBucketVersioning.
\n *\n * \n * Overriding Response Header Values\n *
\n * There are times when you want to override certain response header values in a GET\n * response. For example, you might override the Content-Disposition response header value in\n * your GET request.
\n *\n * You can override values for a set of response headers using the following query\n * parameters. These response header values are sent only on a successful request, that is,\n * when status code 200 OK is returned. The set of headers you can override using these\n * parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n * response headers that you can override for the GET response are Content-Type
,\n * Content-Language
, Expires
, Cache-Control
,\n * Content-Disposition
, and Content-Encoding
. To override these\n * header values in the GET response, you use the following request parameters.
\n *\n * \n * You must sign the request, either using an Authorization header or a presigned URL,\n * when using these parameters. They cannot be used with an unsigned (anonymous)\n * request.
\n * \n * \n * - \n *
\n * response-content-type
\n *
\n * \n * - \n *
\n * response-content-language
\n *
\n * \n * - \n *
\n * response-expires
\n *
\n * \n * - \n *
\n * response-cache-control
\n *
\n * \n * - \n *
\n * response-content-disposition
\n *
\n * \n * - \n *
\n * response-content-encoding
\n *
\n * \n *
\n *\n * \n * Additional Considerations about Request Headers\n *
\n *\n * If both of the If-Match
and If-Unmodified-Since
headers are\n * present in the request as follows: If-Match
condition evaluates to\n * true
, and; If-Unmodified-Since
condition evaluates to\n * false
; then, S3 returns 200 OK and the data requested.
\n *\n * If both of the If-None-Match
and If-Modified-Since
headers are\n * present in the request as follows: If-None-Match
condition evaluates to\n * false
, and; If-Modified-Since
condition evaluates to\n * true
; then, S3 returns 304 Not Modified response code.
\n *\n * For more information about conditional requests, see RFC 7232.
\n *\n * The following operations are related to GetObject
:
\n * \n */\nvar GetObjectCommand = /** @class */ (function (_super) {\n __extends(GetObjectCommand, _super);\n // Start section: command_properties\n // End section: command_properties\n function GetObjectCommand(input) {\n var _this = \n // Start section: command_constructor\n _super.call(this) || this;\n _this.input = input;\n return _this;\n // End section: command_constructor\n }\n /**\n * @internal\n */\n GetObjectCommand.prototype.resolveMiddleware = function (clientStack, configuration, options) {\n this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));\n this.middlewareStack.use(getSsecPlugin(configuration));\n this.middlewareStack.use(getBucketEndpointPlugin(configuration));\n var stack = clientStack.concat(this.middlewareStack);\n var logger = configuration.logger;\n var clientName = \"S3Client\";\n var commandName = \"GetObjectCommand\";\n var handlerExecutionContext = {\n logger: logger,\n clientName: clientName,\n commandName: commandName,\n inputFilterSensitiveLog: GetObjectRequest.filterSensitiveLog,\n outputFilterSensitiveLog: GetObjectOutput.filterSensitiveLog,\n };\n var requestHandler = configuration.requestHandler;\n return stack.resolve(function (request) {\n return requestHandler.handle(request.request, options || {});\n }, handlerExecutionContext);\n };\n GetObjectCommand.prototype.serialize = function (input, context) {\n return serializeAws_restXmlGetObjectCommand(input, context);\n };\n GetObjectCommand.prototype.deserialize = function (output, context) {\n return deserializeAws_restXmlGetObjectCommand(output, context);\n };\n return GetObjectCommand;\n}($Command));\nexport { GetObjectCommand };\n//# sourceMappingURL=GetObjectCommand.js.map","import { __extends } from \"tslib\";\nimport { DeleteObjectOutput, DeleteObjectRequest } from \"../models/models_0\";\nimport { deserializeAws_restXmlDeleteObjectCommand, serializeAws_restXmlDeleteObjectCommand, } from \"../protocols/Aws_restXml\";\nimport { getBucketEndpointPlugin } from \"@aws-sdk/middleware-bucket-endpoint\";\nimport { getSerdePlugin } from \"@aws-sdk/middleware-serde\";\nimport { Command as $Command } from \"@aws-sdk/smithy-client\";\n/**\n * Removes the null version (if there is one) of an object and inserts a delete marker,\n * which becomes the latest version of the object. If there isn't a null version, Amazon S3 does\n * not remove any objects.
\n *\n * To remove a specific version, you must be the bucket owner and you must use the version\n * Id subresource. Using this subresource permanently deletes the version. If the object\n * deleted is a delete marker, Amazon S3 sets the response header,\n * x-amz-delete-marker
, to true.
\n *\n * If the object you want to delete is in a bucket where the bucket versioning\n * configuration is MFA Delete enabled, you must include the x-amz-mfa
request\n * header in the DELETE versionId
request. Requests that include\n * x-amz-mfa
must use HTTPS.
\n *\n * For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.
\n *\n * You can delete objects by explicitly calling the DELETE Object API or configure its\n * lifecycle (PutBucketLifecycle) to\n * enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or\n * deleting objects from your bucket, you must deny them the s3:DeleteObject
,\n * s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
\n * actions.
\n *\n * The following operation is related to DeleteObject
:
\n * \n */\nvar DeleteObjectCommand = /** @class */ (function (_super) {\n __extends(DeleteObjectCommand, _super);\n // Start section: command_properties\n // End section: command_properties\n function DeleteObjectCommand(input) {\n var _this = \n // Start section: command_constructor\n _super.call(this) || this;\n _this.input = input;\n return _this;\n // End section: command_constructor\n }\n /**\n * @internal\n */\n DeleteObjectCommand.prototype.resolveMiddleware = function (clientStack, configuration, options) {\n this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));\n this.middlewareStack.use(getBucketEndpointPlugin(configuration));\n var stack = clientStack.concat(this.middlewareStack);\n var logger = configuration.logger;\n var clientName = \"S3Client\";\n var commandName = \"DeleteObjectCommand\";\n var handlerExecutionContext = {\n logger: logger,\n clientName: clientName,\n commandName: commandName,\n inputFilterSensitiveLog: DeleteObjectRequest.filterSensitiveLog,\n outputFilterSensitiveLog: DeleteObjectOutput.filterSensitiveLog,\n };\n var requestHandler = configuration.requestHandler;\n return stack.resolve(function (request) {\n return requestHandler.handle(request.request, options || {});\n }, handlerExecutionContext);\n };\n DeleteObjectCommand.prototype.serialize = function (input, context) {\n return serializeAws_restXmlDeleteObjectCommand(input, context);\n };\n DeleteObjectCommand.prototype.deserialize = function (output, context) {\n return deserializeAws_restXmlDeleteObjectCommand(output, context);\n };\n return DeleteObjectCommand;\n}($Command));\nexport { DeleteObjectCommand };\n//# sourceMappingURL=DeleteObjectCommand.js.map","import { __extends } from \"tslib\";\nimport { ListObjectsOutput, ListObjectsRequest } from \"../models/models_0\";\nimport { deserializeAws_restXmlListObjectsCommand, serializeAws_restXmlListObjectsCommand, } from \"../protocols/Aws_restXml\";\nimport { getBucketEndpointPlugin } from \"@aws-sdk/middleware-bucket-endpoint\";\nimport { getSerdePlugin } from \"@aws-sdk/middleware-serde\";\nimport { Command as $Command } from \"@aws-sdk/smithy-client\";\n/**\n * Returns some or all (up to 1,000) of the objects in a bucket. You can use the request\n * parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK\n * response can contain valid or invalid XML. Be sure to design your application to parse the\n * contents of the response and handle it appropriately.
\n * \n * This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility,\n * Amazon S3 continues to support ListObjects
.
\n * \n *\n *\n * The following operations are related to ListObjects
:
\n * \n */\nvar ListObjectsCommand = /** @class */ (function (_super) {\n __extends(ListObjectsCommand, _super);\n // Start section: command_properties\n // End section: command_properties\n function ListObjectsCommand(input) {\n var _this = \n // Start section: command_constructor\n _super.call(this) || this;\n _this.input = input;\n return _this;\n // End section: command_constructor\n }\n /**\n * @internal\n */\n ListObjectsCommand.prototype.resolveMiddleware = function (clientStack, configuration, options) {\n this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));\n this.middlewareStack.use(getBucketEndpointPlugin(configuration));\n var stack = clientStack.concat(this.middlewareStack);\n var logger = configuration.logger;\n var clientName = \"S3Client\";\n var commandName = \"ListObjectsCommand\";\n var handlerExecutionContext = {\n logger: logger,\n clientName: clientName,\n commandName: commandName,\n inputFilterSensitiveLog: ListObjectsRequest.filterSensitiveLog,\n outputFilterSensitiveLog: ListObjectsOutput.filterSensitiveLog,\n };\n var requestHandler = configuration.requestHandler;\n return stack.resolve(function (request) {\n return requestHandler.handle(request.request, options || {});\n }, handlerExecutionContext);\n };\n ListObjectsCommand.prototype.serialize = function (input, context) {\n return serializeAws_restXmlListObjectsCommand(input, context);\n };\n ListObjectsCommand.prototype.deserialize = function (output, context) {\n return deserializeAws_restXmlListObjectsCommand(output, context);\n };\n return ListObjectsCommand;\n}($Command));\nexport { ListObjectsCommand };\n//# sourceMappingURL=ListObjectsCommand.js.map","import { Int64 as IInt64 } from \"@aws-sdk/types\";\nimport { toHex } from \"@aws-sdk/util-hex-encoding\";\n\nexport interface Int64 extends IInt64 {}\n\n/**\n * A lossless representation of a signed, 64-bit integer. Instances of this\n * class may be used in arithmetic expressions as if they were numeric\n * primitives, but the binary representation will be preserved unchanged as the\n * `bytes` property of the object. The bytes should be encoded as big-endian,\n * two's complement integers.\n */\nexport class Int64 {\n constructor(readonly bytes: Uint8Array) {\n if (bytes.byteLength !== 8) {\n throw new Error(\"Int64 buffers must be exactly 8 bytes\");\n }\n }\n\n static fromNumber(number: number): Int64 {\n if (number > 9223372036854775807 || number < -9223372036854775808) {\n throw new Error(`${number} is too large (or, if negative, too small) to represent as an Int64`);\n }\n\n const bytes = new Uint8Array(8);\n for (let i = 7, remaining = Math.abs(Math.round(number)); i > -1 && remaining > 0; i--, remaining /= 256) {\n bytes[i] = remaining;\n }\n\n if (number < 0) {\n negate(bytes);\n }\n\n return new Int64(bytes);\n }\n\n /**\n * Called implicitly by infix arithmetic operators.\n */\n valueOf(): number {\n const bytes = this.bytes.slice(0);\n const negative = bytes[0] & 0b10000000;\n if (negative) {\n negate(bytes);\n }\n\n return parseInt(toHex(bytes), 16) * (negative ? -1 : 1);\n }\n\n toString() {\n return String(this.valueOf());\n }\n}\n\nfunction negate(bytes: Uint8Array): void {\n for (let i = 0; i < 8; i++) {\n bytes[i] ^= 0xff;\n }\n\n for (let i = 7; i > -1; i--) {\n bytes[i]++;\n if (bytes[i] !== 0) break;\n }\n}\n","import { Decoder, Encoder, MessageHeaders, MessageHeaderValue } from \"@aws-sdk/types\";\nimport { fromHex, toHex } from \"@aws-sdk/util-hex-encoding\";\n\nimport { Int64 } from \"./Int64\";\n\n/**\n * @internal\n */\nexport class HeaderMarshaller {\n constructor(private readonly toUtf8: Encoder, private readonly fromUtf8: Decoder) {}\n\n format(headers: MessageHeaders): Uint8Array {\n const chunks: Array = [];\n\n for (const headerName of Object.keys(headers)) {\n const bytes = this.fromUtf8(headerName);\n chunks.push(Uint8Array.from([bytes.byteLength]), bytes, this.formatHeaderValue(headers[headerName]));\n }\n\n const out = new Uint8Array(chunks.reduce((carry, bytes) => carry + bytes.byteLength, 0));\n let position = 0;\n for (const chunk of chunks) {\n out.set(chunk, position);\n position += chunk.byteLength;\n }\n\n return out;\n }\n\n private formatHeaderValue(header: MessageHeaderValue): Uint8Array {\n switch (header.type) {\n case \"boolean\":\n return Uint8Array.from([header.value ? HEADER_VALUE_TYPE.boolTrue : HEADER_VALUE_TYPE.boolFalse]);\n case \"byte\":\n return Uint8Array.from([HEADER_VALUE_TYPE.byte, header.value]);\n case \"short\":\n const shortView = new DataView(new ArrayBuffer(3));\n shortView.setUint8(0, HEADER_VALUE_TYPE.short);\n shortView.setInt16(1, header.value, false);\n return new Uint8Array(shortView.buffer);\n case \"integer\":\n const intView = new DataView(new ArrayBuffer(5));\n intView.setUint8(0, HEADER_VALUE_TYPE.integer);\n intView.setInt32(1, header.value, false);\n return new Uint8Array(intView.buffer);\n case \"long\":\n const longBytes = new Uint8Array(9);\n longBytes[0] = HEADER_VALUE_TYPE.long;\n longBytes.set(header.value.bytes, 1);\n return longBytes;\n case \"binary\":\n const binView = new DataView(new ArrayBuffer(3 + header.value.byteLength));\n binView.setUint8(0, HEADER_VALUE_TYPE.byteArray);\n binView.setUint16(1, header.value.byteLength, false);\n const binBytes = new Uint8Array(binView.buffer);\n binBytes.set(header.value, 3);\n return binBytes;\n case \"string\":\n const utf8Bytes = this.fromUtf8(header.value);\n const strView = new DataView(new ArrayBuffer(3 + utf8Bytes.byteLength));\n strView.setUint8(0, HEADER_VALUE_TYPE.string);\n strView.setUint16(1, utf8Bytes.byteLength, false);\n const strBytes = new Uint8Array(strView.buffer);\n strBytes.set(utf8Bytes, 3);\n return strBytes;\n case \"timestamp\":\n const tsBytes = new Uint8Array(9);\n tsBytes[0] = HEADER_VALUE_TYPE.timestamp;\n tsBytes.set(Int64.fromNumber(header.value.valueOf()).bytes, 1);\n return tsBytes;\n case \"uuid\":\n if (!UUID_PATTERN.test(header.value)) {\n throw new Error(`Invalid UUID received: ${header.value}`);\n }\n\n const uuidBytes = new Uint8Array(17);\n uuidBytes[0] = HEADER_VALUE_TYPE.uuid;\n uuidBytes.set(fromHex(header.value.replace(/\\-/g, \"\")), 1);\n return uuidBytes;\n }\n }\n\n parse(headers: DataView): MessageHeaders {\n const out: MessageHeaders = {};\n let position = 0;\n\n while (position < headers.byteLength) {\n const nameLength = headers.getUint8(position++);\n const name = this.toUtf8(new Uint8Array(headers.buffer, headers.byteOffset + position, nameLength));\n position += nameLength;\n\n switch (headers.getUint8(position++)) {\n case HEADER_VALUE_TYPE.boolTrue:\n out[name] = {\n type: BOOLEAN_TAG,\n value: true,\n };\n break;\n case HEADER_VALUE_TYPE.boolFalse:\n out[name] = {\n type: BOOLEAN_TAG,\n value: false,\n };\n break;\n case HEADER_VALUE_TYPE.byte:\n out[name] = {\n type: BYTE_TAG,\n value: headers.getInt8(position++),\n };\n break;\n case HEADER_VALUE_TYPE.short:\n out[name] = {\n type: SHORT_TAG,\n value: headers.getInt16(position, false),\n };\n position += 2;\n break;\n case HEADER_VALUE_TYPE.integer:\n out[name] = {\n type: INT_TAG,\n value: headers.getInt32(position, false),\n };\n position += 4;\n break;\n case HEADER_VALUE_TYPE.long:\n out[name] = {\n type: LONG_TAG,\n value: new Int64(new Uint8Array(headers.buffer, headers.byteOffset + position, 8)),\n };\n position += 8;\n break;\n case HEADER_VALUE_TYPE.byteArray:\n const binaryLength = headers.getUint16(position, false);\n position += 2;\n out[name] = {\n type: BINARY_TAG,\n value: new Uint8Array(headers.buffer, headers.byteOffset + position, binaryLength),\n };\n position += binaryLength;\n break;\n case HEADER_VALUE_TYPE.string:\n const stringLength = headers.getUint16(position, false);\n position += 2;\n out[name] = {\n type: STRING_TAG,\n value: this.toUtf8(new Uint8Array(headers.buffer, headers.byteOffset + position, stringLength)),\n };\n position += stringLength;\n break;\n case HEADER_VALUE_TYPE.timestamp:\n out[name] = {\n type: TIMESTAMP_TAG,\n value: new Date(new Int64(new Uint8Array(headers.buffer, headers.byteOffset + position, 8)).valueOf()),\n };\n position += 8;\n break;\n case HEADER_VALUE_TYPE.uuid:\n const uuidBytes = new Uint8Array(headers.buffer, headers.byteOffset + position, 16);\n position += 16;\n out[name] = {\n type: UUID_TAG,\n value: `${toHex(uuidBytes.subarray(0, 4))}-${toHex(uuidBytes.subarray(4, 6))}-${toHex(\n uuidBytes.subarray(6, 8)\n )}-${toHex(uuidBytes.subarray(8, 10))}-${toHex(uuidBytes.subarray(10))}`,\n };\n break;\n default:\n throw new Error(`Unrecognized header type tag`);\n }\n }\n\n return out;\n }\n}\n\nconst enum HEADER_VALUE_TYPE {\n boolTrue = 0,\n boolFalse,\n byte,\n short,\n integer,\n long,\n byteArray,\n string,\n timestamp,\n uuid,\n}\n\nconst BOOLEAN_TAG = \"boolean\";\nconst BYTE_TAG = \"byte\";\nconst SHORT_TAG = \"short\";\nconst INT_TAG = \"integer\";\nconst LONG_TAG = \"long\";\nconst BINARY_TAG = \"binary\";\nconst STRING_TAG = \"string\";\nconst TIMESTAMP_TAG = \"timestamp\";\nconst UUID_TAG = \"uuid\";\n\nconst UUID_PATTERN = /^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$/;\n","import { EventStreamMarshaller as EventMarshaller } from \"@aws-sdk/eventstream-marshaller\";\nimport { EventStreamMarshaller as UniversalEventStreamMarshaller } from \"@aws-sdk/eventstream-serde-universal\";\nimport { Decoder, Encoder, EventStreamMarshaller as IEventStreamMarshaller, Message } from \"@aws-sdk/types\";\n\nimport { iterableToReadableStream, readableStreamtoIterable } from \"./utils\";\n\nexport interface EventStreamMarshaller extends IEventStreamMarshaller {}\n\nexport interface EventStreamMarshallerOptions {\n utf8Encoder: Encoder;\n utf8Decoder: Decoder;\n}\n\n/**\n * Utility class used to serialize and deserialize event streams in\n * browsers and ReactNative.\n *\n * In browsers where ReadableStream API is available:\n * * deserialize from ReadableStream to an async iterable of output structure\n * * serialize from async iterable of input structure to ReadableStream\n * In ReactNative where only async iterable API is available:\n * * deserialize from async iterable of binaries to async iterable of output structure\n * * serialize from async iterable of input structure to async iterable of binaries\n *\n * We use ReadableStream API in browsers because of the consistency with other\n * streaming operations, where ReadableStream API is used to denote streaming data.\n * Whereas in ReactNative, ReadableStream API is not available, we use async iterable\n * for streaming data although it has lower throughput.\n */\nexport class EventStreamMarshaller {\n private readonly eventMarshaller: EventMarshaller;\n private readonly universalMarshaller: UniversalEventStreamMarshaller;\n constructor({ utf8Encoder, utf8Decoder }: EventStreamMarshallerOptions) {\n this.eventMarshaller = new EventMarshaller(utf8Encoder, utf8Decoder);\n this.universalMarshaller = new UniversalEventStreamMarshaller({\n utf8Decoder,\n utf8Encoder,\n });\n }\n\n deserialize(\n body: ReadableStream | AsyncIterable,\n deserializer: (input: { [event: string]: Message }) => Promise\n ): AsyncIterable {\n const bodyIterable = isReadableStream(body) ? readableStreamtoIterable(body) : body;\n return this.universalMarshaller.deserialize(bodyIterable, deserializer);\n }\n\n /**\n * Generate a stream that serialize events into stream of binary chunks;\n *\n * Caveat is that streaming request payload doesn't work on browser with native\n * xhr or fetch handler currently because they don't support upload streaming.\n * reference:\n * * https://bugs.chromium.org/p/chromium/issues/detail?id=688906\n * * https://bugzilla.mozilla.org/show_bug.cgi?id=1387483\n *\n */\n serialize(input: AsyncIterable, serializer: (event: T) => Message): ReadableStream | AsyncIterable {\n const serialziedIterable = this.universalMarshaller.serialize(input, serializer);\n return typeof ReadableStream === \"function\" ? iterableToReadableStream(serialziedIterable) : serialziedIterable;\n }\n}\n\nconst isReadableStream = (body: any): body is ReadableStream =>\n typeof ReadableStream === \"function\" && body instanceof ReadableStream;\n","import { Crc32 } from \"@aws-crypto/crc32\";\n\n// All prelude components are unsigned, 32-bit integers\nconst PRELUDE_MEMBER_LENGTH = 4;\n// The prelude consists of two components\nconst PRELUDE_LENGTH = PRELUDE_MEMBER_LENGTH * 2;\n// Checksums are always CRC32 hashes.\nconst CHECKSUM_LENGTH = 4;\n// Messages must include a full prelude, a prelude checksum, and a message checksum\nconst MINIMUM_MESSAGE_LENGTH = PRELUDE_LENGTH + CHECKSUM_LENGTH * 2;\n\n/**\n * @internal\n */\nexport interface MessageParts {\n headers: DataView;\n body: Uint8Array;\n}\n\n/**\n * @internal\n */\nexport function splitMessage({ byteLength, byteOffset, buffer }: ArrayBufferView): MessageParts {\n if (byteLength < MINIMUM_MESSAGE_LENGTH) {\n throw new Error(\"Provided message too short to accommodate event stream message overhead\");\n }\n\n const view = new DataView(buffer, byteOffset, byteLength);\n\n const messageLength = view.getUint32(0, false);\n\n if (byteLength !== messageLength) {\n throw new Error(\"Reported message length does not match received message length\");\n }\n\n const headerLength = view.getUint32(PRELUDE_MEMBER_LENGTH, false);\n const expectedPreludeChecksum = view.getUint32(PRELUDE_LENGTH, false);\n const expectedMessageChecksum = view.getUint32(byteLength - CHECKSUM_LENGTH, false);\n\n const checksummer = new Crc32().update(new Uint8Array(buffer, byteOffset, PRELUDE_LENGTH));\n if (expectedPreludeChecksum !== checksummer.digest()) {\n throw new Error(\n `The prelude checksum specified in the message (${expectedPreludeChecksum}) does not match the calculated CRC32 checksum (${checksummer.digest()})`\n );\n }\n\n checksummer.update(\n new Uint8Array(buffer, byteOffset + PRELUDE_LENGTH, byteLength - (PRELUDE_LENGTH + CHECKSUM_LENGTH))\n );\n if (expectedMessageChecksum !== checksummer.digest()) {\n throw new Error(\n `The message checksum (${checksummer.digest()}) did not match the expected value of ${expectedMessageChecksum}`\n );\n }\n\n return {\n headers: new DataView(buffer, byteOffset + PRELUDE_LENGTH + CHECKSUM_LENGTH, headerLength),\n body: new Uint8Array(\n buffer,\n byteOffset + PRELUDE_LENGTH + CHECKSUM_LENGTH + headerLength,\n messageLength - headerLength - (PRELUDE_LENGTH + CHECKSUM_LENGTH + CHECKSUM_LENGTH)\n ),\n };\n}\n","export function getChunkedStream(source: AsyncIterable): AsyncIterable {\n let currentMessageTotalLength = 0;\n let currentMessagePendingLength = 0;\n let currentMessage: Uint8Array | null = null;\n let messageLengthBuffer: Uint8Array | null = null;\n const allocateMessage = (size: number) => {\n if (typeof size !== \"number\") {\n throw new Error(\"Attempted to allocate an event message where size was not a number: \" + size);\n }\n currentMessageTotalLength = size;\n currentMessagePendingLength = 4;\n currentMessage = new Uint8Array(size);\n const currentMessageView = new DataView(currentMessage.buffer);\n currentMessageView.setUint32(0, size, false); //set big-endian Uint32 to 0~3 bytes\n };\n\n const iterator = async function* () {\n const sourceIterator = source[Symbol.asyncIterator]();\n while (true) {\n const { value, done } = await sourceIterator.next();\n if (done) {\n if (!currentMessageTotalLength) {\n return;\n } else if (currentMessageTotalLength === currentMessagePendingLength) {\n yield currentMessage as Uint8Array;\n } else {\n throw new Error(\"Truncated event message received.\");\n }\n return;\n }\n\n const chunkLength = value.length;\n let currentOffset = 0;\n\n while (currentOffset < chunkLength) {\n // create new message if necessary\n if (!currentMessage) {\n // working on a new message, determine total length\n const bytesRemaining = chunkLength - currentOffset;\n // prevent edge case where total length spans 2 chunks\n if (!messageLengthBuffer) {\n messageLengthBuffer = new Uint8Array(4);\n }\n const numBytesForTotal = Math.min(\n 4 - currentMessagePendingLength, // remaining bytes to fill the messageLengthBuffer\n bytesRemaining // bytes left in chunk\n );\n\n messageLengthBuffer.set(\n // @ts-ignore error TS2532: Object is possibly 'undefined' for value\n value.slice(currentOffset, currentOffset + numBytesForTotal),\n currentMessagePendingLength\n );\n\n currentMessagePendingLength += numBytesForTotal;\n currentOffset += numBytesForTotal;\n\n if (currentMessagePendingLength < 4) {\n // not enough information to create the current message\n break;\n }\n allocateMessage(new DataView(messageLengthBuffer.buffer).getUint32(0, false));\n messageLengthBuffer = null;\n }\n\n // write data into current message\n const numBytesToWrite = Math.min(\n currentMessageTotalLength - currentMessagePendingLength, // number of bytes left to complete message\n chunkLength - currentOffset // number of bytes left in the original chunk\n );\n currentMessage!.set(\n // @ts-ignore error TS2532: Object is possibly 'undefined' for value\n value.slice(currentOffset, currentOffset + numBytesToWrite),\n currentMessagePendingLength\n );\n currentMessagePendingLength += numBytesToWrite;\n currentOffset += numBytesToWrite;\n\n // check if a message is ready to be pushed\n if (currentMessageTotalLength && currentMessageTotalLength === currentMessagePendingLength) {\n // push out the message\n yield currentMessage as Uint8Array;\n // cleanup\n currentMessage = null;\n currentMessageTotalLength = 0;\n currentMessagePendingLength = 0;\n }\n }\n }\n };\n\n return {\n [Symbol.asyncIterator]: iterator,\n };\n}\n","import { EventStreamMarshaller as EventMarshaller } from \"@aws-sdk/eventstream-marshaller\";\nimport { Encoder, Message } from \"@aws-sdk/types\";\n\nexport type UnmarshalledStreamOptions = {\n eventMarshaller: EventMarshaller;\n deserializer: (input: { [name: string]: Message }) => Promise;\n toUtf8: Encoder;\n};\n\nexport function getUnmarshalledStream(\n source: AsyncIterable,\n options: UnmarshalledStreamOptions\n): AsyncIterable {\n return {\n [Symbol.asyncIterator]: async function* () {\n for await (const chunk of source) {\n const message = options.eventMarshaller.unmarshall(chunk);\n const { value: messageType } = message.headers[\":message-type\"];\n if (messageType === \"error\") {\n // Unmodeled exception in event\n const unmodeledError = new Error((message.headers[\":error-message\"].value as string) || \"UnknownError\");\n unmodeledError.name = message.headers[\":error-code\"].value as string;\n throw unmodeledError;\n } else if (messageType === \"exception\") {\n // For modeled exception, push it to deserializer and throw after deserializing\n const code = message.headers[\":exception-type\"].value as string;\n const exception = { [code]: message };\n // Get parsed exception event in key(error code) value(structured error) pair.\n const deserializedException = await options.deserializer(exception);\n if (deserializedException.$unknown) {\n //this is an unmodeled exception then try parsing it with best effort\n const error = new Error(options.toUtf8(message.body));\n error.name = code;\n throw error;\n }\n throw deserializedException[code];\n } else if (messageType === \"event\") {\n const event = {\n [message.headers[\":event-type\"].value as string]: message,\n };\n const deserialized = await options.deserializer(event);\n if (deserialized.$unknown) continue;\n yield deserialized;\n } else {\n throw Error(`Unrecognizable event type: ${message.headers[\":event-type\"].value}`);\n }\n }\n },\n };\n}\n","/**\n * A util function converting ReadableStream into an async iterable.\n * Reference: https://jakearchibald.com/2017/async-iterators-and-generators/#making-streams-iterate\n */\nexport const readableStreamtoIterable = (readableStream: ReadableStream): AsyncIterable => ({\n [Symbol.asyncIterator]: async function* () {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { done, value } = await reader.read();\n if (done) return;\n yield value as T;\n }\n } finally {\n reader.releaseLock();\n }\n },\n});\n\n/**\n * A util function converting async iterable to a ReadableStream.\n */\nexport const iterableToReadableStream = (asyncIterable: AsyncIterable): ReadableStream => {\n const iterator = asyncIterable[Symbol.asyncIterator]();\n return new ReadableStream({\n async pull(controller) {\n const { done, value } = await iterator.next();\n if (done) {\n return controller.close();\n }\n controller.enqueue(value);\n },\n });\n};\n","import { __assign } from \"tslib\";\n// Partition default templates\nvar AWS_TEMPLATE = \"cognito-identity.{region}.amazonaws.com\";\nvar AWS_CN_TEMPLATE = \"cognito-identity.{region}.amazonaws.com.cn\";\nvar AWS_ISO_TEMPLATE = \"cognito-identity.{region}.c2s.ic.gov\";\nvar AWS_ISO_B_TEMPLATE = \"cognito-identity.{region}.sc2s.sgov.gov\";\nvar AWS_US_GOV_TEMPLATE = \"cognito-identity.{region}.amazonaws.com\";\n// Partition regions\nvar AWS_REGIONS = new Set([\n \"af-south-1\",\n \"ap-east-1\",\n \"ap-northeast-1\",\n \"ap-northeast-2\",\n \"ap-south-1\",\n \"ap-southeast-1\",\n \"ap-southeast-2\",\n \"ca-central-1\",\n \"eu-central-1\",\n \"eu-north-1\",\n \"eu-south-1\",\n \"eu-west-1\",\n \"eu-west-2\",\n \"eu-west-3\",\n \"me-south-1\",\n \"sa-east-1\",\n \"us-east-1\",\n \"us-east-2\",\n \"us-west-1\",\n \"us-west-2\",\n]);\nvar AWS_CN_REGIONS = new Set([\"cn-north-1\", \"cn-northwest-1\"]);\nvar AWS_ISO_REGIONS = new Set([\"us-iso-east-1\"]);\nvar AWS_ISO_B_REGIONS = new Set([\"us-isob-east-1\"]);\nvar AWS_US_GOV_REGIONS = new Set([\"us-gov-east-1\", \"us-gov-west-1\"]);\nexport var defaultRegionInfoProvider = function (region, options) {\n var regionInfo = undefined;\n switch (region) {\n // First, try to match exact region names.\n case \"ap-northeast-1\":\n regionInfo = {\n hostname: \"cognito-identity.ap-northeast-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"ap-northeast-2\":\n regionInfo = {\n hostname: \"cognito-identity.ap-northeast-2.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"ap-south-1\":\n regionInfo = {\n hostname: \"cognito-identity.ap-south-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"ap-southeast-1\":\n regionInfo = {\n hostname: \"cognito-identity.ap-southeast-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"ap-southeast-2\":\n regionInfo = {\n hostname: \"cognito-identity.ap-southeast-2.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"ca-central-1\":\n regionInfo = {\n hostname: \"cognito-identity.ca-central-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"cn-north-1\":\n regionInfo = {\n hostname: \"cognito-identity.cn-north-1.amazonaws.com.cn\",\n partition: \"aws-cn\",\n };\n break;\n case \"eu-central-1\":\n regionInfo = {\n hostname: \"cognito-identity.eu-central-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"eu-north-1\":\n regionInfo = {\n hostname: \"cognito-identity.eu-north-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"eu-west-1\":\n regionInfo = {\n hostname: \"cognito-identity.eu-west-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"eu-west-2\":\n regionInfo = {\n hostname: \"cognito-identity.eu-west-2.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"eu-west-3\":\n regionInfo = {\n hostname: \"cognito-identity.eu-west-3.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"fips-us-east-1\":\n regionInfo = {\n hostname: \"cognito-identity-fips.us-east-1.amazonaws.com\",\n partition: \"aws\",\n signingRegion: \"us-east-1\",\n };\n break;\n case \"fips-us-east-2\":\n regionInfo = {\n hostname: \"cognito-identity-fips.us-east-2.amazonaws.com\",\n partition: \"aws\",\n signingRegion: \"us-east-2\",\n };\n break;\n case \"fips-us-gov-west-1\":\n regionInfo = {\n hostname: \"cognito-identity-fips.us-gov-west-1.amazonaws.com\",\n partition: \"aws-us-gov\",\n signingRegion: \"us-gov-west-1\",\n };\n break;\n case \"fips-us-west-2\":\n regionInfo = {\n hostname: \"cognito-identity-fips.us-west-2.amazonaws.com\",\n partition: \"aws\",\n signingRegion: \"us-west-2\",\n };\n break;\n case \"sa-east-1\":\n regionInfo = {\n hostname: \"cognito-identity.sa-east-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"us-east-1\":\n regionInfo = {\n hostname: \"cognito-identity.us-east-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"us-east-2\":\n regionInfo = {\n hostname: \"cognito-identity.us-east-2.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"us-gov-west-1\":\n regionInfo = {\n hostname: \"cognito-identity.us-gov-west-1.amazonaws.com\",\n partition: \"aws-us-gov\",\n };\n break;\n case \"us-west-1\":\n regionInfo = {\n hostname: \"cognito-identity.us-west-1.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n case \"us-west-2\":\n regionInfo = {\n hostname: \"cognito-identity.us-west-2.amazonaws.com\",\n partition: \"aws\",\n };\n break;\n // Next, try to match partition endpoints.\n default:\n if (AWS_REGIONS.has(region)) {\n regionInfo = {\n hostname: AWS_TEMPLATE.replace(\"{region}\", region),\n partition: \"aws\",\n };\n }\n if (AWS_CN_REGIONS.has(region)) {\n regionInfo = {\n hostname: AWS_CN_TEMPLATE.replace(\"{region}\", region),\n partition: \"aws-cn\",\n };\n }\n if (AWS_ISO_REGIONS.has(region)) {\n regionInfo = {\n hostname: AWS_ISO_TEMPLATE.replace(\"{region}\", region),\n partition: \"aws-iso\",\n };\n }\n if (AWS_ISO_B_REGIONS.has(region)) {\n regionInfo = {\n hostname: AWS_ISO_B_TEMPLATE.replace(\"{region}\", region),\n partition: \"aws-iso-b\",\n };\n }\n if (AWS_US_GOV_REGIONS.has(region)) {\n regionInfo = {\n hostname: AWS_US_GOV_TEMPLATE.replace(\"{region}\", region),\n partition: \"aws-us-gov\",\n };\n }\n // Finally, assume it's an AWS partition endpoint.\n if (regionInfo === undefined) {\n regionInfo = {\n hostname: AWS_TEMPLATE.replace(\"{region}\", region),\n partition: \"aws\",\n };\n }\n }\n return Promise.resolve(__assign({ signingService: \"cognito-identity\" }, regionInfo));\n};\n//# sourceMappingURL=endpoints.js.map","import { defaultRegionInfoProvider } from \"./endpoints\";\nimport { parseUrl } from \"@aws-sdk/url-parser\";\n/**\n * @internal\n */\nexport var ClientSharedValues = {\n apiVersion: \"2014-06-30\",\n disableHostPrefix: false,\n logger: {},\n regionInfoProvider: defaultRegionInfoProvider,\n serviceId: \"Cognito Identity\",\n urlParser: parseUrl,\n};\n//# sourceMappingURL=runtimeConfig.shared.js.map","import { __assign } from \"tslib\";\nimport packageInfo from \"./package.json\";\nimport { Sha256 } from \"@aws-crypto/sha256-browser\";\nimport { FetchHttpHandler, streamCollector } from \"@aws-sdk/fetch-http-handler\";\nimport { invalidProvider } from \"@aws-sdk/invalid-dependency\";\nimport { DEFAULT_MAX_ATTEMPTS } from \"@aws-sdk/middleware-retry\";\nimport { fromBase64, toBase64 } from \"@aws-sdk/util-base64-browser\";\nimport { calculateBodyLength } from \"@aws-sdk/util-body-length-browser\";\nimport { defaultUserAgent } from \"@aws-sdk/util-user-agent-browser\";\nimport { fromUtf8, toUtf8 } from \"@aws-sdk/util-utf8-browser\";\nimport { ClientSharedValues } from \"./runtimeConfig.shared\";\n/**\n * @internal\n */\nexport var ClientDefaultValues = __assign(__assign({}, ClientSharedValues), { runtime: \"browser\", base64Decoder: fromBase64, base64Encoder: toBase64, bodyLengthChecker: calculateBodyLength, credentialDefaultProvider: function (_) { return function () { return Promise.reject(new Error(\"Credential is missing\")); }; }, defaultUserAgentProvider: defaultUserAgent({\n serviceId: ClientSharedValues.serviceId,\n clientVersion: packageInfo.version,\n }), maxAttempts: DEFAULT_MAX_ATTEMPTS, region: invalidProvider(\"Region is missing\"), requestHandler: new FetchHttpHandler(), sha256: Sha256, streamCollector: streamCollector, utf8Decoder: fromUtf8, utf8Encoder: toUtf8 });\n//# sourceMappingURL=runtimeConfig.browser.js.map","import { Decoder, Encoder, EventSigner, EventStreamSerdeProvider, Provider } from \"@aws-sdk/types\";\n\nimport { EventStreamMarshaller } from \"./EventStreamMarshaller\";\n\n/** browser event stream serde utils provider */\nexport const eventStreamSerdeProvider: EventStreamSerdeProvider = (options: {\n utf8Encoder: Encoder;\n utf8Decoder: Decoder;\n eventSigner: EventSigner | Provider;\n}) => new EventStreamMarshaller(options);\n","import {\n InitializeHandler,\n InitializeHandlerArguments,\n InitializeHandlerOptions,\n InitializeHandlerOutput,\n InitializeMiddleware,\n MetadataBearer,\n Pluggable,\n} from \"@aws-sdk/types\";\nimport { validate as validateArn } from \"@aws-sdk/util-arn-parser\";\n\n/**\n * @internal\n */\nexport function validateBucketNameMiddleware(): InitializeMiddleware {\n return