|
| 1 | +import { Pack, pack } from "tar-stream"; |
| 2 | +import { pullManifestsFromRegistry } from './registry'; |
| 3 | +import { ExtendedManifests, Image } from './interface-manifest'; |
| 4 | +import { computeDockerLayers, streamDockerLayers, Layer } from './layers'; |
| 5 | +import { AxiosBasicCredentials } from "axios"; |
| 6 | + |
| 7 | +/** dotEtch version, will be sent as the first file on stream */ |
| 8 | +const VERSION = '1.0' |
| 9 | + |
| 10 | +/** |
| 11 | + * Main Processing function |
| 12 | + * |
| 13 | + * Beware that, as we're outputing to a tar stream, order of operation is important. |
| 14 | + * Operations can't be made async. |
| 15 | + * |
| 16 | + * Order of files in an `.etch` tar stream is : |
| 17 | + * 1. manifest |
| 18 | + * 2. base os image - can be zipped; name of the file should match the one specified in the manifest |
| 19 | + * 3. /inject/_partitions_/_foldersOrFilesToInject_ - injectables assets |
| 20 | + */ |
| 21 | + |
| 22 | +/** |
| 23 | + * PromisePacker |
| 24 | + * Promisify tar-stream.pack.entry ( https://www.npmjs.com/package/tar-stream ) |
| 25 | + */ |
| 26 | + |
| 27 | +const promisePacker = |
| 28 | + (pack: Pack, injectFolder?: string) => (header: any, value: any) => |
| 29 | + new Promise((resolve, reject) => { |
| 30 | + if (header.name.includes('sha256:')) { |
| 31 | + console.log(`=> FIXME!! pack header.name: ${header.name}`); |
| 32 | + } |
| 33 | + // add the root injectable folder in front of the name when injecting files |
| 34 | + if (injectFolder) { |
| 35 | + header.name = `${injectFolder}/${header.name}`; |
| 36 | + } |
| 37 | + pack.entry(header, value, (error: any) => { |
| 38 | + if (error) { |
| 39 | + reject(error); |
| 40 | + } |
| 41 | + resolve(true); |
| 42 | + }); |
| 43 | + }); |
| 44 | + |
| 45 | +/** |
| 46 | + * Prepare Etch Stream |
| 47 | + * Create the stream to be use by all other function |
| 48 | + * @param outputStream Writable output stream (can be a file or http response or ...) |
| 49 | + * @returns tar-stream pack writable stream |
| 50 | + */ |
| 51 | +const prepareEtchStream = async (): Promise<Pack> => { |
| 52 | + const packStream = pack(); |
| 53 | + |
| 54 | + // stream manifest |
| 55 | + const packer = promisePacker(packStream); |
| 56 | + await packer( |
| 57 | + { |
| 58 | + name: `/VERSION`, |
| 59 | + mode: 777, |
| 60 | + }, |
| 61 | + VERSION |
| 62 | + ); |
| 63 | + |
| 64 | + console.log("==> dotEtch Stream Ready @prepareEtchStream"); |
| 65 | + return packStream; |
| 66 | +}; |
| 67 | + |
| 68 | +const closeEtchStream = async (packStream: Pack) => { |
| 69 | + // close tarball |
| 70 | + await packStream.finalize(); |
| 71 | + console.log("==> dotEtch Stream Closed @closeEtchStream"); |
| 72 | +}; |
| 73 | + |
| 74 | +const streamBaseImage = async ( |
| 75 | + packStream: Pack, |
| 76 | + baseImageStream: NodeJS.ReadableStream, |
| 77 | + baseImageSize: number, |
| 78 | + baseImageName: string |
| 79 | +): Promise<void> => |
| 80 | + new Promise(async (resolve, reject) => { |
| 81 | + // Beware that knowing the file size in advance is mandatory |
| 82 | + const baseImageStreamEntry = packStream.entry({ |
| 83 | + name: baseImageName, |
| 84 | + mode: 644, |
| 85 | + size: baseImageSize, |
| 86 | + }); |
| 87 | + |
| 88 | + console.log("== Start streaming base image @streamBaseImage =="); |
| 89 | + |
| 90 | + baseImageStream.pipe(baseImageStreamEntry); |
| 91 | + |
| 92 | + baseImageStream.on("end", function () { |
| 93 | + // we're good we can continue the process |
| 94 | + console.log("== End of base image streaming @streamBaseImage =="); |
| 95 | + resolve(); |
| 96 | + }); |
| 97 | + |
| 98 | + baseImageStream.on("error", function (error) { |
| 99 | + // something went wrong |
| 100 | + reject(error); |
| 101 | + }); |
| 102 | + }); |
| 103 | + |
| 104 | +/** |
| 105 | + * Stream Preloading Docker Assets prepared at previous stage, |
| 106 | + * needs to be done on a prepared DotEtch stream with a base image already streamed |
| 107 | + * (cf readme) */ |
| 108 | +const streamDockerAssets = async ( |
| 109 | + packStream: Pack, |
| 110 | + dataPartition: number, |
| 111 | + manifests: ExtendedManifests[], |
| 112 | + layers: Layer[], |
| 113 | +): Promise<void> => |
| 114 | + new Promise(async (resolve, reject) => { |
| 115 | + try { |
| 116 | + console.log('==> STARTING @streamPreloadAssets'); |
| 117 | + |
| 118 | + // prepare packer : |
| 119 | + const injectPath = `inject/${dataPartition}`; |
| 120 | + |
| 121 | + // download and process layers |
| 122 | + // this is where most of the work is happening |
| 123 | + // will stream content of the layers directly to packStream |
| 124 | + // everything before this point can be parallelise with streaming the base image |
| 125 | + // this step MUST be right AFTER the base image stream is done |
| 126 | + // will return a bunch of file to inject later (all the generated metadata file) |
| 127 | + const layersFilesToInject = await streamDockerLayers( |
| 128 | + manifests, |
| 129 | + layers, |
| 130 | + packStream, |
| 131 | + injectPath, |
| 132 | + ); |
| 133 | + |
| 134 | + // prepare images files to inject (same as for layers but for images) |
| 135 | + const dockerImageOverlay2Imagedb = 'docker/image/overlay2/imagedb'; |
| 136 | + const imagesFilesToInject = manifests |
| 137 | + .map(({ configManifestV2, imageId }: any) => { |
| 138 | + const shortImageId = imageId.split(':')[1]; |
| 139 | + return [ |
| 140 | + { |
| 141 | + header: { |
| 142 | + name: `${dockerImageOverlay2Imagedb}/content/sha256/${shortImageId}`, |
| 143 | + mode: 644, |
| 144 | + }, |
| 145 | + content: JSON.stringify(configManifestV2), |
| 146 | + }, |
| 147 | + { |
| 148 | + header: { |
| 149 | + name: `${dockerImageOverlay2Imagedb}/metadata/sha256/${shortImageId}/lastUpdated`, |
| 150 | + mode: 644, |
| 151 | + }, |
| 152 | + content: new Date().toISOString(), |
| 153 | + }, |
| 154 | + ]; |
| 155 | + }) |
| 156 | + .flat(); |
| 157 | + |
| 158 | + /** |
| 159 | + * generate repositories.json snipets for each images, merge everything and prepare file to be injected |
| 160 | + * /var/lib/docker/image/overlay2/repositories.json |
| 161 | + * That file informs balena-engine of what images are availble in its local store |
| 162 | + * and maps images name(s) (including tag) to an image digest. |
| 163 | + * |
| 164 | + * Here we generate a complete repositories.json for all the preloaded images, including the supervisor. |
| 165 | + * |
| 166 | + * We will overwrite the orignal repositories.json which has been created at the balenaos build. |
| 167 | + * |
| 168 | + * One small difference between the original and the one we create is that we don't tag the supevisor with its hash. |
| 169 | + * Which shouldn't have any impact, but is worth noting "au cas où" |
| 170 | + * |
| 171 | + * Relative path of repositories.json as injected in the resin-data partition |
| 172 | + * On a running device it would be /var/lib/docker/image/overlay2/repositories.json |
| 173 | + * |
| 174 | + */ |
| 175 | + |
| 176 | + const repositories: any = {}; |
| 177 | + for (const { |
| 178 | + imageId, |
| 179 | + imageName, |
| 180 | + imageHash, |
| 181 | + isSupervisor, |
| 182 | + supervisorVersion, |
| 183 | + } of manifests) { |
| 184 | + // prepare repositories |
| 185 | + repositories[imageName] = { |
| 186 | + [`${imageName}:latest`]: `sha256:${imageId}`, |
| 187 | + }; |
| 188 | + if (imageHash !== 'latest') { |
| 189 | + repositories[imageName][ |
| 190 | + `${imageName}:@${imageHash}` |
| 191 | + ] = `sha256:${imageId}`; |
| 192 | + } |
| 193 | + |
| 194 | + /* TODO: `isSupervisor` and `supervisorVersion`are both balena specific and can be kept undefined in all other situation |
| 195 | + * including when using balena's hostApps (not yet supported on balenaos) |
| 196 | + * Once balena moves to hostapps this exception can be remove |
| 197 | + */ |
| 198 | + if (isSupervisor) { |
| 199 | + repositories['balena_supervisor'] = { |
| 200 | + [`balena_supervisor:${supervisorVersion}`]: imageId, |
| 201 | + }; |
| 202 | + } |
| 203 | + } |
| 204 | + |
| 205 | + // prepare other metadata files |
| 206 | + const generalFilesToInject = [ |
| 207 | + { |
| 208 | + header: { |
| 209 | + name: 'docker/image/overlay2/repositories.json', |
| 210 | + mode: 644, |
| 211 | + }, |
| 212 | + content: JSON.stringify({ |
| 213 | + Repositories: repositories, |
| 214 | + }), |
| 215 | + }, |
| 216 | + ]; |
| 217 | + |
| 218 | + console.log('---> Add metadata files and folders'); |
| 219 | + // inject all metadata files and folders |
| 220 | + // one at a time on the stream |
| 221 | + await streamFiles( |
| 222 | + packStream, |
| 223 | + dataPartition, |
| 224 | + [ |
| 225 | + ...layersFilesToInject, |
| 226 | + ...imagesFilesToInject, |
| 227 | + ...generalFilesToInject, |
| 228 | + ] |
| 229 | + ); |
| 230 | + |
| 231 | + // we're done with the preload assets |
| 232 | + console.log('==> FINISHED @streamPreloadAssets'); |
| 233 | + |
| 234 | + resolve(); |
| 235 | + } catch (error) { |
| 236 | + console.log("couldn't make assets", error); |
| 237 | + reject(); |
| 238 | + } |
| 239 | + }); |
| 240 | + |
| 241 | + /** |
| 242 | + * Stream Preloading Arbitrary Assets |
| 243 | + * needs to be done on a prepared DotEtch stream with a base image already streamed |
| 244 | + * (cf readme) */ |
| 245 | + const streamFiles = async ( |
| 246 | + packStream: Pack, |
| 247 | + dataPartition: number | string, |
| 248 | + files: any[] |
| 249 | + ): Promise<void> => new Promise(async (resolve, reject) => { |
| 250 | + try { |
| 251 | + console.log('==> STARTING @streamFiles'); |
| 252 | + |
| 253 | + // prepare packer : |
| 254 | + const injectPath = `inject/${dataPartition}`; |
| 255 | + const packFile = promisePacker(packStream, injectPath); // promise |
| 256 | + |
| 257 | + for (const { header, content } of files) { |
| 258 | + await packFile(header, content); |
| 259 | + } |
| 260 | + |
| 261 | + resolve() |
| 262 | + |
| 263 | + console.log('==> FINISHED @streamFiles'); |
| 264 | + } catch (error) { |
| 265 | + console.error(error) |
| 266 | + reject() |
| 267 | + } |
| 268 | + }) |
| 269 | + |
| 270 | + const getManifestsForImages = (images: Image[], auth: AxiosBasicCredentials | string) => new Promise(async (resolve, reject) => { |
| 271 | + try { |
| 272 | + // get image manifests for all requested images |
| 273 | + const manifests: ExtendedManifests[] = []; |
| 274 | + console.log(`== Downloading Manifests @getManifests ==`); |
| 275 | + for (const image in images) { |
| 276 | + if (!Object.prototype.hasOwnProperty.call(images, image)) return |
| 277 | + |
| 278 | + const imageName = images[image].imageName; |
| 279 | + |
| 280 | + console.log( |
| 281 | + `=> ${parseInt(image, 10) + 1} / ${images.length} : ${imageName}`, |
| 282 | + ); |
| 283 | + |
| 284 | + // auth is the credentials that we'll use to get a registry token, exchange api will be indicated by the registry |
| 285 | + const manifestInfo = await pullManifestsFromRegistry( |
| 286 | + imageName, |
| 287 | + auth, |
| 288 | + ); |
| 289 | + manifests.push({ |
| 290 | + ...manifestInfo, |
| 291 | + ...images[image], |
| 292 | + }); |
| 293 | + } |
| 294 | + console.log(`== Downloading Manifests @getManifests DONE ==`); |
| 295 | + |
| 296 | + resolve(manifests); |
| 297 | + } catch(error) { |
| 298 | + console.log("Failed to get manifests", error) |
| 299 | + reject() |
| 300 | + } |
| 301 | + }) |
| 302 | + |
| 303 | +export { prepareEtchStream, streamBaseImage, streamFiles, closeEtchStream, streamDockerAssets, getManifestsForImages, computeDockerLayers}; |
0 commit comments