@proceedings {1913, title = {An Empirical Analysis of Build Failures in the Continuous Integration Workflows of Java-Based Open-Source Software}, year = {2017}, note = {"empirical study of CI build failures in 14 Java-based OSS projects. We extract and analyze data from publicly available GitHub repositories and Travis-CI build logs"}, month = {05/2017}, pages = {345-355}, abstract = {{\textemdash}Continuous Integration (CI) has become a common practice in both industrial and open-source software development. While CI has evidently improved aspects of the software development process, errors during CI builds pose a threat to development efficiency. As an increasing amount of time goes into fixing such errors, failing builds can significantly impair the development process and become very costly. We perform an indepth analysis of build failures in CI environments. Our approach links repository commits to data of corresponding CI builds. Using data from 14 open-source Java projects, we first identify 14 common error categories. Besides test failures, which are by far the most common error category (up to >80\% per project), we also identify noisy build data, e.g., induced by transient Git interaction errors, or general infrastructure flakiness. Second, we analyze which factors impact the build results, taking into account general process and specific CI metrics. Our results indicate that process metrics have a significant impact on the build outcome in 8 of the 14 projects on average, but the strongest influencing factor across all projects is overall stability in the recent build history. For 10 projects, more than 50\% (up to 80\%) of all failed builds follow a previous build failure. Moreover, the fail ratio of the last k=10 builds has a significant impact on build results for all projects in our dataset.}, keywords = {build errors, continuous integration, correlation analysis, msr}, author = {Thomas Rausch and Waldemar Hummer and Philipp Leitner and Stefan Schulte} } @proceedings {1908, title = {How Open Source Projects use Static Code Analysis Tools in Continuous Integration Pipelines}, year = {2017}, note = {Data: the paper studies the use of ASCATs (Automated Static Code Analysis Tools) within CI pipelines of 20 popular Java open source projects hosted on GitHub and using Travis CI to support CI activities}, month = {05/2017}, pages = {334-344}, abstract = {Static analysis tools are often used by software developers to entail early detection of potential faults, vulnerabilities, code smells, or to assess the source code adherence to coding standards and guidelines. Also, their adoption within Continuous Integration (CI) pipelines has been advocated by researchers and practitioners. This paper studies the usage of static analysis tools in 20 Java open source projects hosted on GitHub and using Travis CI as continuous integration infrastructure. Specifically, we investigate (i) which tools are being used and how they are configured for the CI, (ii) what types of issues make the build fail or raise warnings, and (iii) whether, how, and after how long are broken builds and warnings resolved. Results indicate that in the analyzed projects build breakages due to static analysis tools are mainly related to adherence to coding standards, and there is also some attention to missing licenses. Build failures related to tools identifying potential bugs or vulnerabilities occur less frequently, and in some cases such tools are activated in a {\textquotedblleft}softer{\textquotedblright} mode, without making the build fail. Also, the study reveals that build breakages due to static analysis tools are quickly fixed by actually solving the problem, rather than by disabling the warning, and are often properly documented.}, keywords = {continuous integration, empirical study, static analysis}, doi = {10.1109/MSR.2017.2}, author = {Zampetti, Fiorella and Scalabrino, Simone and Oliveto, Rocco and Canfora, Gerardo and Di Penta, Massimiliano} }