001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.yarn.applications.distributedshell; 020 021 import java.io.BufferedReader; 022 import java.io.IOException; 023 import java.io.InputStream; 024 import java.io.InputStreamReader; 025 import java.net.InetSocketAddress; 026 import java.util.ArrayList; 027 import java.util.HashMap; 028 import java.util.List; 029 import java.util.Map; 030 import java.util.Vector; 031 032 import org.apache.commons.cli.CommandLine; 033 import org.apache.commons.cli.GnuParser; 034 import org.apache.commons.cli.HelpFormatter; 035 import org.apache.commons.cli.Options; 036 import org.apache.commons.cli.ParseException; 037 import org.apache.commons.logging.Log; 038 import org.apache.commons.logging.LogFactory; 039 import org.apache.hadoop.classification.InterfaceAudience; 040 import org.apache.hadoop.classification.InterfaceStability; 041 import org.apache.hadoop.conf.Configuration; 042 import org.apache.hadoop.fs.FileStatus; 043 import org.apache.hadoop.fs.FileSystem; 044 import org.apache.hadoop.fs.Path; 045 import org.apache.hadoop.yarn.api.ApplicationConstants; 046 import org.apache.hadoop.yarn.api.ClientRMProtocol; 047 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; 048 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; 049 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; 050 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; 051 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; 052 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; 053 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; 054 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; 055 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; 056 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; 057 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; 058 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; 059 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; 060 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; 061 import org.apache.hadoop.yarn.api.records.ApplicationId; 062 import org.apache.hadoop.yarn.api.records.ApplicationReport; 063 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; 064 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; 065 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; 066 import org.apache.hadoop.yarn.api.records.LocalResource; 067 import org.apache.hadoop.yarn.api.records.LocalResourceType; 068 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; 069 import org.apache.hadoop.yarn.api.records.NodeReport; 070 import org.apache.hadoop.yarn.api.records.Priority; 071 import org.apache.hadoop.yarn.api.records.QueueACL; 072 import org.apache.hadoop.yarn.api.records.QueueInfo; 073 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; 074 import org.apache.hadoop.yarn.api.records.Resource; 075 import org.apache.hadoop.yarn.api.records.YarnApplicationState; 076 import org.apache.hadoop.yarn.conf.YarnConfiguration; 077 import org.apache.hadoop.yarn.exceptions.YarnRemoteException; 078 import org.apache.hadoop.yarn.ipc.YarnRPC; 079 import org.apache.hadoop.yarn.util.ConverterUtils; 080 import org.apache.hadoop.yarn.util.Records; 081 082 083 /** 084 * Client for Distributed Shell application submission to YARN. 085 * 086 * <p> The distributed shell client allows an application master to be launched that in turn would run 087 * the provided shell command on a set of containers. </p> 088 * 089 * <p>This client is meant to act as an example on how to write yarn-based applications. </p> 090 * 091 * <p> To submit an application, a client first needs to connect to the <code>ResourceManager</code> 092 * aka ApplicationsManager or ASM via the {@link ClientRMProtocol}. The {@link ClientRMProtocol} 093 * provides a way for the client to get access to cluster information and to request for a 094 * new {@link ApplicationId}. <p> 095 * 096 * <p> For the actual job submission, the client first has to create an {@link ApplicationSubmissionContext}. 097 * The {@link ApplicationSubmissionContext} defines the application details such as {@link ApplicationId} 098 * and application name, the priority assigned to the application and the queue 099 * to which this application needs to be assigned. In addition to this, the {@link ApplicationSubmissionContext} 100 * also defines the {@link ContainerLaunchContext} which describes the <code>Container</code> with which 101 * the {@link ApplicationMaster} is launched. </p> 102 * 103 * <p> The {@link ContainerLaunchContext} in this scenario defines the resources to be allocated for the 104 * {@link ApplicationMaster}'s container, the local resources (jars, configuration files) to be made available 105 * and the environment to be set for the {@link ApplicationMaster} and the commands to be executed to run the 106 * {@link ApplicationMaster}. <p> 107 * 108 * <p> Using the {@link ApplicationSubmissionContext}, the client submits the application to the 109 * <code>ResourceManager</code> and then monitors the application by requesting the <code>ResourceManager</code> 110 * for an {@link ApplicationReport} at regular time intervals. In case of the application taking too long, the client 111 * kills the application by submitting a {@link KillApplicationRequest} to the <code>ResourceManager</code>. </p> 112 * 113 */ 114 @InterfaceAudience.Public 115 @InterfaceStability.Unstable 116 public class Client { 117 118 private static final Log LOG = LogFactory.getLog(Client.class); 119 120 // Configuration 121 private Configuration conf; 122 123 // RPC to communicate to RM 124 private YarnRPC rpc; 125 126 // Handle to talk to the Resource Manager/Applications Manager 127 private ClientRMProtocol applicationsManager; 128 129 // Application master specific info to register a new Application with RM/ASM 130 private String appName = ""; 131 // App master priority 132 private int amPriority = 0; 133 // Queue for App master 134 private String amQueue = ""; 135 // Amt. of memory resource to request for to run the App Master 136 private int amMemory = 10; 137 138 // Application master jar file 139 private String appMasterJar = ""; 140 // Main class to invoke application master 141 private String appMasterMainClass = ""; 142 143 // Shell command to be executed 144 private String shellCommand = ""; 145 // Location of shell script 146 private String shellScriptPath = ""; 147 // Args to be passed to the shell command 148 private String shellArgs = ""; 149 // Env variables to be setup for the shell command 150 private Map<String, String> shellEnv = new HashMap<String, String>(); 151 // Shell Command Container priority 152 private int shellCmdPriority = 0; 153 154 // Amt of memory to request for container in which shell script will be executed 155 private int containerMemory = 10; 156 // No. of containers in which the shell script needs to be executed 157 private int numContainers = 1; 158 159 // log4j.properties file 160 // if available, add to local resources and set into classpath 161 private String log4jPropFile = ""; 162 163 // Start time for client 164 private final long clientStartTime = System.currentTimeMillis(); 165 // Timeout threshold for client. Kill app after time interval expires. 166 private long clientTimeout = 600000; 167 168 // Debug flag 169 boolean debugFlag = false; 170 171 /** 172 * @param args Command line arguments 173 */ 174 public static void main(String[] args) { 175 boolean result = false; 176 try { 177 Client client = new Client(); 178 LOG.info("Initializing Client"); 179 boolean doRun = client.init(args); 180 if (!doRun) { 181 System.exit(0); 182 } 183 result = client.run(); 184 } catch (Throwable t) { 185 LOG.fatal("Error running CLient", t); 186 System.exit(1); 187 } 188 if (result) { 189 LOG.info("Application completed successfully"); 190 System.exit(0); 191 } 192 LOG.error("Application failed to complete successfully"); 193 System.exit(2); 194 } 195 196 /** 197 */ 198 public Client(Configuration conf) throws Exception { 199 // Set up the configuration and RPC 200 this.conf = conf; 201 rpc = YarnRPC.create(conf); 202 } 203 204 /** 205 */ 206 public Client() throws Exception { 207 this(new Configuration()); 208 } 209 210 /** 211 * Helper function to print out usage 212 * @param opts Parsed command line options 213 */ 214 private void printUsage(Options opts) { 215 new HelpFormatter().printHelp("Client", opts); 216 } 217 218 /** 219 * Parse command line options 220 * @param args Parsed command line options 221 * @return Whether the init was successful to run the client 222 * @throws ParseException 223 */ 224 public boolean init(String[] args) throws ParseException { 225 226 Options opts = new Options(); 227 opts.addOption("appname", true, "Application Name. Default value - DistributedShell"); 228 opts.addOption("priority", true, "Application Priority. Default 0"); 229 opts.addOption("queue", true, "RM Queue in which this application is to be submitted"); 230 opts.addOption("timeout", true, "Application timeout in milliseconds"); 231 opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); 232 opts.addOption("jar", true, "Jar file containing the application master"); 233 opts.addOption("class", true, "Main class to be run for the Application Master."); 234 opts.addOption("shell_command", true, "Shell command to be executed by the Application Master"); 235 opts.addOption("shell_script", true, "Location of the shell script to be executed"); 236 opts.addOption("shell_args", true, "Command line args for the shell script"); 237 opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs"); 238 opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers"); 239 opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); 240 opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); 241 opts.addOption("log_properties", true, "log4j.properties file"); 242 opts.addOption("debug", false, "Dump out debug information"); 243 opts.addOption("help", false, "Print usage"); 244 CommandLine cliParser = new GnuParser().parse(opts, args); 245 246 if (args.length == 0) { 247 printUsage(opts); 248 throw new IllegalArgumentException("No args specified for client to initialize"); 249 } 250 251 if (cliParser.hasOption("help")) { 252 printUsage(opts); 253 return false; 254 } 255 256 if (cliParser.hasOption("debug")) { 257 debugFlag = true; 258 259 } 260 261 appName = cliParser.getOptionValue("appname", "DistributedShell"); 262 amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); 263 amQueue = cliParser.getOptionValue("queue", "default"); 264 amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10")); 265 266 if (amMemory < 0) { 267 throw new IllegalArgumentException("Invalid memory specified for application master, exiting." 268 + " Specified memory=" + amMemory); 269 } 270 271 if (!cliParser.hasOption("jar")) { 272 throw new IllegalArgumentException("No jar file specified for application master"); 273 } 274 275 appMasterJar = cliParser.getOptionValue("jar"); 276 appMasterMainClass = cliParser.getOptionValue("class", 277 "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster"); 278 279 if (!cliParser.hasOption("shell_command")) { 280 throw new IllegalArgumentException("No shell command specified to be executed by application master"); 281 } 282 shellCommand = cliParser.getOptionValue("shell_command"); 283 284 if (cliParser.hasOption("shell_script")) { 285 shellScriptPath = cliParser.getOptionValue("shell_script"); 286 } 287 if (cliParser.hasOption("shell_args")) { 288 shellArgs = cliParser.getOptionValue("shell_args"); 289 } 290 if (cliParser.hasOption("shell_env")) { 291 String envs[] = cliParser.getOptionValues("shell_env"); 292 for (String env : envs) { 293 env = env.trim(); 294 int index = env.indexOf('='); 295 if (index == -1) { 296 shellEnv.put(env, ""); 297 continue; 298 } 299 String key = env.substring(0, index); 300 String val = ""; 301 if (index < (env.length()-1)) { 302 val = env.substring(index+1); 303 } 304 shellEnv.put(key, val); 305 } 306 } 307 shellCmdPriority = Integer.parseInt(cliParser.getOptionValue("shell_cmd_priority", "0")); 308 309 containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); 310 numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); 311 312 if (containerMemory < 0 || numContainers < 1) { 313 throw new IllegalArgumentException("Invalid no. of containers or container memory specified, exiting." 314 + " Specified containerMemory=" + containerMemory 315 + ", numContainer=" + numContainers); 316 } 317 318 clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000")); 319 320 log4jPropFile = cliParser.getOptionValue("log_properties", ""); 321 322 return true; 323 } 324 325 /** 326 * Main run function for the client 327 * @return true if application completed successfully 328 * @throws IOException 329 */ 330 public boolean run() throws IOException { 331 LOG.info("Starting Client"); 332 333 // Connect to ResourceManager 334 connectToASM(); 335 assert(applicationsManager != null); 336 337 // Use ClientRMProtocol handle to general cluster information 338 GetClusterMetricsRequest clusterMetricsReq = Records.newRecord(GetClusterMetricsRequest.class); 339 GetClusterMetricsResponse clusterMetricsResp = applicationsManager.getClusterMetrics(clusterMetricsReq); 340 LOG.info("Got Cluster metric info from ASM" 341 + ", numNodeManagers=" + clusterMetricsResp.getClusterMetrics().getNumNodeManagers()); 342 343 GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class); 344 GetClusterNodesResponse clusterNodesResp = applicationsManager.getClusterNodes(clusterNodesReq); 345 LOG.info("Got Cluster node info from ASM"); 346 for (NodeReport node : clusterNodesResp.getNodeReports()) { 347 LOG.info("Got node report from ASM for" 348 + ", nodeId=" + node.getNodeId() 349 + ", nodeAddress" + node.getHttpAddress() 350 + ", nodeRackName" + node.getRackName() 351 + ", nodeNumContainers" + node.getNumContainers() 352 + ", nodeHealthStatus" + node.getNodeHealthStatus()); 353 } 354 355 GetQueueInfoRequest queueInfoReq = Records.newRecord(GetQueueInfoRequest.class); 356 queueInfoReq.setQueueName(this.amQueue); 357 GetQueueInfoResponse queueInfoResp = applicationsManager.getQueueInfo(queueInfoReq); 358 QueueInfo queueInfo = queueInfoResp.getQueueInfo(); 359 LOG.info("Queue info" 360 + ", queueName=" + queueInfo.getQueueName() 361 + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() 362 + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() 363 + ", queueApplicationCount=" + queueInfo.getApplications().size() 364 + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); 365 366 GetQueueUserAclsInfoRequest queueUserAclsReq = Records.newRecord(GetQueueUserAclsInfoRequest.class); 367 GetQueueUserAclsInfoResponse queueUserAclsResp = applicationsManager.getQueueUserAcls(queueUserAclsReq); 368 List<QueueUserACLInfo> listAclInfo = queueUserAclsResp.getUserAclsInfoList(); 369 for (QueueUserACLInfo aclInfo : listAclInfo) { 370 for (QueueACL userAcl : aclInfo.getUserAcls()) { 371 LOG.info("User ACL Info for Queue" 372 + ", queueName=" + aclInfo.getQueueName() 373 + ", userAcl=" + userAcl.name()); 374 } 375 } 376 377 // Get a new application id 378 GetNewApplicationResponse newApp = getApplication(); 379 ApplicationId appId = newApp.getApplicationId(); 380 381 // TODO get min/max resource capabilities from RM and change memory ask if needed 382 // If we do not have min/max, we may not be able to correctly request 383 // the required resources from the RM for the app master 384 // Memory ask has to be a multiple of min and less than max. 385 // Dump out information about cluster capability as seen by the resource manager 386 int minMem = newApp.getMinimumResourceCapability().getMemory(); 387 int maxMem = newApp.getMaximumResourceCapability().getMemory(); 388 LOG.info("Min mem capabililty of resources in this cluster " + minMem); 389 LOG.info("Max mem capabililty of resources in this cluster " + maxMem); 390 391 // A resource ask has to be atleast the minimum of the capability of the cluster, the value has to be 392 // a multiple of the min value and cannot exceed the max. 393 // If it is not an exact multiple of min, the RM will allocate to the nearest multiple of min 394 if (amMemory < minMem) { 395 LOG.info("AM memory specified below min threshold of cluster. Using min value." 396 + ", specified=" + amMemory 397 + ", min=" + minMem); 398 amMemory = minMem; 399 } 400 else if (amMemory > maxMem) { 401 LOG.info("AM memory specified above max threshold of cluster. Using max value." 402 + ", specified=" + amMemory 403 + ", max=" + maxMem); 404 amMemory = maxMem; 405 } 406 407 // Create launch context for app master 408 LOG.info("Setting up application submission context for ASM"); 409 ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); 410 411 // set the application id 412 appContext.setApplicationId(appId); 413 // set the application name 414 appContext.setApplicationName(appName); 415 416 // Set up the container launch context for the application master 417 ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); 418 419 // set local resources for the application master 420 // local files or archives as needed 421 // In this scenario, the jar file for the application master is part of the local resources 422 Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); 423 424 LOG.info("Copy App Master jar from local filesystem and add to local environment"); 425 // Copy the application master jar to the filesystem 426 // Create a local resource to point to the destination jar path 427 FileSystem fs = FileSystem.get(conf); 428 Path src = new Path(appMasterJar); 429 String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar"; 430 Path dst = new Path(fs.getHomeDirectory(), pathSuffix); 431 fs.copyFromLocalFile(false, true, src, dst); 432 FileStatus destStatus = fs.getFileStatus(dst); 433 LocalResource amJarRsrc = Records.newRecord(LocalResource.class); 434 435 // Set the type of resource - file or archive 436 // archives are untarred at destination 437 // we don't need the jar file to be untarred for now 438 amJarRsrc.setType(LocalResourceType.FILE); 439 // Set visibility of the resource 440 // Setting to most private option 441 amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); 442 // Set the resource to be copied over 443 amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); 444 // Set timestamp and length of file so that the framework 445 // can do basic sanity checks for the local resource 446 // after it has been copied over to ensure it is the same 447 // resource the client intended to use with the application 448 amJarRsrc.setTimestamp(destStatus.getModificationTime()); 449 amJarRsrc.setSize(destStatus.getLen()); 450 localResources.put("AppMaster.jar", amJarRsrc); 451 452 // Set the log4j properties if needed 453 if (!log4jPropFile.isEmpty()) { 454 Path log4jSrc = new Path(log4jPropFile); 455 Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); 456 fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); 457 FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); 458 LocalResource log4jRsrc = Records.newRecord(LocalResource.class); 459 log4jRsrc.setType(LocalResourceType.FILE); 460 log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); 461 log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); 462 log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); 463 log4jRsrc.setSize(log4jFileStatus.getLen()); 464 localResources.put("log4j.properties", log4jRsrc); 465 } 466 467 // The shell script has to be made available on the final container(s) 468 // where it will be executed. 469 // To do this, we need to first copy into the filesystem that is visible 470 // to the yarn framework. 471 // We do not need to set this as a local resource for the application 472 // master as the application master does not need it. 473 String hdfsShellScriptLocation = ""; 474 long hdfsShellScriptLen = 0; 475 long hdfsShellScriptTimestamp = 0; 476 if (!shellScriptPath.isEmpty()) { 477 Path shellSrc = new Path(shellScriptPath); 478 String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh"; 479 Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); 480 fs.copyFromLocalFile(false, true, shellSrc, shellDst); 481 hdfsShellScriptLocation = shellDst.toUri().toString(); 482 FileStatus shellFileStatus = fs.getFileStatus(shellDst); 483 hdfsShellScriptLen = shellFileStatus.getLen(); 484 hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); 485 } 486 487 // Set local resource info into app master container launch context 488 amContainer.setLocalResources(localResources); 489 490 // Set the necessary security tokens as needed 491 //amContainer.setContainerTokens(containerToken); 492 493 // Set the env variables to be setup in the env where the application master will be run 494 LOG.info("Set the environment for the application master"); 495 Map<String, String> env = new HashMap<String, String>(); 496 497 // put location of shell script into env 498 // using the env info, the application master will create the correct local resource for the 499 // eventual containers that will be launched to execute the shell scripts 500 env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); 501 env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); 502 env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); 503 504 // Add AppMaster.jar location to classpath 505 // At some point we should not be required to add 506 // the hadoop specific classpaths to the env. 507 // It should be provided out of the box. 508 // For now setting all required classpaths including 509 // the classpath to "." for the application jar 510 StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*"); 511 for (String c : conf.getStrings( 512 YarnConfiguration.YARN_APPLICATION_CLASSPATH, 513 YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { 514 classPathEnv.append(':'); 515 classPathEnv.append(c.trim()); 516 } 517 classPathEnv.append(":./log4j.properties"); 518 519 // add the runtime classpath needed for tests to work 520 String testRuntimeClassPath = Client.getTestRuntimeClasspath(); 521 classPathEnv.append(':'); 522 classPathEnv.append(testRuntimeClassPath); 523 524 env.put("CLASSPATH", classPathEnv.toString()); 525 526 amContainer.setEnvironment(env); 527 528 // Set the necessary command to execute the application master 529 Vector<CharSequence> vargs = new Vector<CharSequence>(30); 530 531 // Set java executable command 532 LOG.info("Setting up app master command"); 533 vargs.add("${JAVA_HOME}" + "/bin/java"); 534 // Set Xmx based on am memory size 535 vargs.add("-Xmx" + amMemory + "m"); 536 // Set class name 537 vargs.add(appMasterMainClass); 538 // Set params for Application Master 539 vargs.add("--container_memory " + String.valueOf(containerMemory)); 540 vargs.add("--num_containers " + String.valueOf(numContainers)); 541 vargs.add("--priority " + String.valueOf(shellCmdPriority)); 542 if (!shellCommand.isEmpty()) { 543 vargs.add("--shell_command " + shellCommand + ""); 544 } 545 if (!shellArgs.isEmpty()) { 546 vargs.add("--shell_args " + shellArgs + ""); 547 } 548 for (Map.Entry<String, String> entry : shellEnv.entrySet()) { 549 vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); 550 } 551 if (debugFlag) { 552 vargs.add("--debug"); 553 } 554 555 vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); 556 vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); 557 558 // Get final commmand 559 StringBuilder command = new StringBuilder(); 560 for (CharSequence str : vargs) { 561 command.append(str).append(" "); 562 } 563 564 LOG.info("Completed setting up app master command " + command.toString()); 565 List<String> commands = new ArrayList<String>(); 566 commands.add(command.toString()); 567 amContainer.setCommands(commands); 568 569 // Set up resource type requirements 570 // For now, only memory is supported so we set memory requirements 571 Resource capability = Records.newRecord(Resource.class); 572 capability.setMemory(amMemory); 573 amContainer.setResource(capability); 574 575 // Service data is a binary blob that can be passed to the application 576 // Not needed in this scenario 577 // amContainer.setServiceData(serviceData); 578 579 // The following are not required for launching an application master 580 // amContainer.setContainerId(containerId); 581 582 appContext.setAMContainerSpec(amContainer); 583 584 // Set the priority for the application master 585 Priority pri = Records.newRecord(Priority.class); 586 // TODO - what is the range for priority? how to decide? 587 pri.setPriority(amPriority); 588 appContext.setPriority(pri); 589 590 // Set the queue to which this application is to be submitted in the RM 591 appContext.setQueue(amQueue); 592 593 // Create the request to send to the applications manager 594 SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class); 595 appRequest.setApplicationSubmissionContext(appContext); 596 597 // Submit the application to the applications manager 598 // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); 599 // Ignore the response as either a valid response object is returned on success 600 // or an exception thrown to denote some form of a failure 601 LOG.info("Submitting application to ASM"); 602 applicationsManager.submitApplication(appRequest); 603 604 // TODO 605 // Try submitting the same request again 606 // app submission failure? 607 608 // Monitor the application 609 return monitorApplication(appId); 610 611 } 612 613 /** 614 * Monitor the submitted application for completion. 615 * Kill application if time expires. 616 * @param appId Application Id of application to be monitored 617 * @return true if application completed successfully 618 * @throws YarnRemoteException 619 */ 620 private boolean monitorApplication(ApplicationId appId) throws YarnRemoteException { 621 622 while (true) { 623 624 // Check app status every 1 second. 625 try { 626 Thread.sleep(1000); 627 } catch (InterruptedException e) { 628 LOG.debug("Thread sleep in monitoring loop interrupted"); 629 } 630 631 // Get application report for the appId we are interested in 632 GetApplicationReportRequest reportRequest = Records.newRecord(GetApplicationReportRequest.class); 633 reportRequest.setApplicationId(appId); 634 GetApplicationReportResponse reportResponse = applicationsManager.getApplicationReport(reportRequest); 635 ApplicationReport report = reportResponse.getApplicationReport(); 636 637 LOG.info("Got application report from ASM for" 638 + ", appId=" + appId.getId() 639 + ", clientToken=" + report.getClientToken() 640 + ", appDiagnostics=" + report.getDiagnostics() 641 + ", appMasterHost=" + report.getHost() 642 + ", appQueue=" + report.getQueue() 643 + ", appMasterRpcPort=" + report.getRpcPort() 644 + ", appStartTime=" + report.getStartTime() 645 + ", yarnAppState=" + report.getYarnApplicationState().toString() 646 + ", distributedFinalState=" + report.getFinalApplicationStatus().toString() 647 + ", appTrackingUrl=" + report.getTrackingUrl() 648 + ", appUser=" + report.getUser()); 649 650 YarnApplicationState state = report.getYarnApplicationState(); 651 FinalApplicationStatus dsStatus = report.getFinalApplicationStatus(); 652 if (YarnApplicationState.FINISHED == state) { 653 if (FinalApplicationStatus.SUCCEEDED == dsStatus) { 654 LOG.info("Application has completed successfully. Breaking monitoring loop"); 655 return true; 656 } 657 else { 658 LOG.info("Application did finished unsuccessfully." 659 + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() 660 + ". Breaking monitoring loop"); 661 return false; 662 } 663 } 664 else if (YarnApplicationState.KILLED == state 665 || YarnApplicationState.FAILED == state) { 666 LOG.info("Application did not finish." 667 + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() 668 + ". Breaking monitoring loop"); 669 return false; 670 } 671 672 if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) { 673 LOG.info("Reached client specified timeout for application. Killing application"); 674 killApplication(appId); 675 return false; 676 } 677 } 678 679 } 680 681 /** 682 * Kill a submitted application by sending a call to the ASM 683 * @param appId Application Id to be killed. 684 * @throws YarnRemoteException 685 */ 686 private void killApplication(ApplicationId appId) throws YarnRemoteException { 687 KillApplicationRequest request = Records.newRecord(KillApplicationRequest.class); 688 // TODO clarify whether multiple jobs with the same app id can be submitted and be running at 689 // the same time. 690 // If yes, can we kill a particular attempt only? 691 request.setApplicationId(appId); 692 // KillApplicationResponse response = applicationsManager.forceKillApplication(request); 693 // Response can be ignored as it is non-null on success or 694 // throws an exception in case of failures 695 applicationsManager.forceKillApplication(request); 696 } 697 698 /** 699 * Connect to the Resource Manager/Applications Manager 700 * @return Handle to communicate with the ASM 701 * @throws IOException 702 */ 703 private void connectToASM() throws IOException { 704 705 /* 706 UserGroupInformation user = UserGroupInformation.getCurrentUser(); 707 applicationsManager = user.doAs(new PrivilegedAction<ClientRMProtocol>() { 708 public ClientRMProtocol run() { 709 InetSocketAddress rmAddress = NetUtils.createSocketAddr(conf.get( 710 YarnConfiguration.RM_SCHEDULER_ADDRESS, 711 YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS)); 712 LOG.info("Connecting to ResourceManager at " + rmAddress); 713 Configuration appsManagerServerConf = new Configuration(conf); 714 appsManagerServerConf.setClass(YarnConfiguration.YARN_SECURITY_INFO, 715 ClientRMSecurityInfo.class, SecurityInfo.class); 716 ClientRMProtocol asm = ((ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, appsManagerServerConf)); 717 return asm; 718 } 719 }); 720 */ 721 YarnConfiguration yarnConf = new YarnConfiguration(conf); 722 InetSocketAddress rmAddress = yarnConf.getSocketAddr( 723 YarnConfiguration.RM_ADDRESS, 724 YarnConfiguration.DEFAULT_RM_ADDRESS, 725 YarnConfiguration.DEFAULT_RM_PORT); 726 LOG.info("Connecting to ResourceManager at " + rmAddress); 727 applicationsManager = ((ClientRMProtocol) rpc.getProxy( 728 ClientRMProtocol.class, rmAddress, conf)); 729 } 730 731 /** 732 * Get a new application from the ASM 733 * @return New Application 734 * @throws YarnRemoteException 735 */ 736 private GetNewApplicationResponse getApplication() throws YarnRemoteException { 737 GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class); 738 GetNewApplicationResponse response = applicationsManager.getNewApplication(request); 739 LOG.info("Got new application id=" + response.getApplicationId()); 740 return response; 741 } 742 743 private static String getTestRuntimeClasspath() { 744 745 InputStream classpathFileStream = null; 746 BufferedReader reader = null; 747 String envClassPath = ""; 748 749 LOG.info("Trying to generate classpath for app master from current thread's classpath"); 750 try { 751 752 // Create classpath from generated classpath 753 // Check maven ppom.xml for generated classpath info 754 // Works if compile time env is same as runtime. Mainly tests. 755 ClassLoader thisClassLoader = 756 Thread.currentThread().getContextClassLoader(); 757 String generatedClasspathFile = "yarn-apps-ds-generated-classpath"; 758 classpathFileStream = 759 thisClassLoader.getResourceAsStream(generatedClasspathFile); 760 if (classpathFileStream == null) { 761 LOG.info("Could not classpath resource from class loader"); 762 return envClassPath; 763 } 764 LOG.info("Readable bytes from stream=" + classpathFileStream.available()); 765 reader = new BufferedReader(new InputStreamReader(classpathFileStream)); 766 String cp = reader.readLine(); 767 if (cp != null) { 768 envClassPath += cp.trim() + ":"; 769 } 770 // Put the file itself on classpath for tasks. 771 envClassPath += thisClassLoader.getResource(generatedClasspathFile).getFile(); 772 } catch (IOException e) { 773 LOG.info("Could not find the necessary resource to generate class path for tests. Error=" + e.getMessage()); 774 } 775 776 try { 777 if (classpathFileStream != null) { 778 classpathFileStream.close(); 779 } 780 if (reader != null) { 781 reader.close(); 782 } 783 } catch (IOException e) { 784 LOG.info("Failed to close class path file stream or reader. Error=" + e.getMessage()); 785 } 786 return envClassPath; 787 } 788 789 }